github.com/artpar/rclone@v1.67.3/backend/mailru/mailru.go (about)

     1  // Package mailru provides an interface to the Mail.ru Cloud storage system.
     2  package mailru
     3  
     4  import (
     5  	"bytes"
     6  	"context"
     7  	"errors"
     8  	"fmt"
     9  	gohash "hash"
    10  	"io"
    11  	"path"
    12  	"path/filepath"
    13  	"sort"
    14  	"strconv"
    15  	"strings"
    16  	"sync"
    17  	"time"
    18  
    19  	"encoding/hex"
    20  	"encoding/json"
    21  	"net/http"
    22  	"net/url"
    23  
    24  	"github.com/artpar/rclone/backend/mailru/api"
    25  	"github.com/artpar/rclone/backend/mailru/mrhash"
    26  
    27  	"github.com/artpar/rclone/fs"
    28  	"github.com/artpar/rclone/fs/config"
    29  	"github.com/artpar/rclone/fs/config/configmap"
    30  	"github.com/artpar/rclone/fs/config/configstruct"
    31  	"github.com/artpar/rclone/fs/config/obscure"
    32  	"github.com/artpar/rclone/fs/fserrors"
    33  	"github.com/artpar/rclone/fs/fshttp"
    34  	"github.com/artpar/rclone/fs/hash"
    35  	"github.com/artpar/rclone/fs/object"
    36  	"github.com/artpar/rclone/fs/operations"
    37  
    38  	"github.com/artpar/rclone/lib/encoder"
    39  	"github.com/artpar/rclone/lib/oauthutil"
    40  	"github.com/artpar/rclone/lib/pacer"
    41  	"github.com/artpar/rclone/lib/readers"
    42  	"github.com/artpar/rclone/lib/rest"
    43  
    44  	"golang.org/x/oauth2"
    45  )
    46  
    47  // Global constants
    48  const (
    49  	minSleepPacer   = 10 * time.Millisecond
    50  	maxSleepPacer   = 2 * time.Second
    51  	decayConstPacer = 2          // bigger for slower decay, exponential
    52  	metaExpirySec   = 20 * 60    // meta server expiration time
    53  	serverExpirySec = 3 * 60     // download server expiration time
    54  	shardExpirySec  = 30 * 60    // upload server expiration time
    55  	maxServerLocks  = 4          // maximum number of locks per single download server
    56  	maxInt32        = 2147483647 // used as limit in directory list request
    57  	speedupMinSize  = 512        // speedup is not optimal if data is smaller than average packet
    58  )
    59  
    60  // Global errors
    61  var (
    62  	ErrorDirAlreadyExists   = errors.New("directory already exists")
    63  	ErrorDirSourceNotExists = errors.New("directory source does not exist")
    64  	ErrorInvalidName        = errors.New("invalid characters in object name")
    65  
    66  	// MrHashType is the hash.Type for Mailru
    67  	MrHashType hash.Type
    68  )
    69  
    70  // Description of how to authorize
    71  var oauthConfig = &oauth2.Config{
    72  	ClientID:     api.OAuthClientID,
    73  	ClientSecret: "",
    74  	Endpoint: oauth2.Endpoint{
    75  		AuthURL:   api.OAuthURL,
    76  		TokenURL:  api.OAuthURL,
    77  		AuthStyle: oauth2.AuthStyleInParams,
    78  	},
    79  }
    80  
    81  // Register with Fs
    82  func init() {
    83  	MrHashType = hash.RegisterHash("mailru", "MailruHash", 40, mrhash.New)
    84  	fs.Register(&fs.RegInfo{
    85  		Name:        "mailru",
    86  		Description: "Mail.ru Cloud",
    87  		NewFs:       NewFs,
    88  		Options: append(oauthutil.SharedOptions, []fs.Option{{
    89  			Name:      "user",
    90  			Help:      "User name (usually email).",
    91  			Required:  true,
    92  			Sensitive: true,
    93  		}, {
    94  			Name: "pass",
    95  			Help: `Password.
    96  
    97  This must be an app password - rclone will not work with your normal
    98  password. See the Configuration section in the docs for how to make an
    99  app password.
   100  `,
   101  			Required:   true,
   102  			IsPassword: true,
   103  		}, {
   104  			Name:     "speedup_enable",
   105  			Default:  true,
   106  			Advanced: false,
   107  			Help: `Skip full upload if there is another file with same data hash.
   108  
   109  This feature is called "speedup" or "put by hash". It is especially efficient
   110  in case of generally available files like popular books, video or audio clips,
   111  because files are searched by hash in all accounts of all mailru users.
   112  It is meaningless and ineffective if source file is unique or encrypted.
   113  Please note that rclone may need local memory and disk space to calculate
   114  content hash in advance and decide whether full upload is required.
   115  Also, if rclone does not know file size in advance (e.g. in case of
   116  streaming or partial uploads), it will not even try this optimization.`,
   117  			Examples: []fs.OptionExample{{
   118  				Value: "true",
   119  				Help:  "Enable",
   120  			}, {
   121  				Value: "false",
   122  				Help:  "Disable",
   123  			}},
   124  		}, {
   125  			Name:     "speedup_file_patterns",
   126  			Default:  "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf",
   127  			Advanced: true,
   128  			Help: `Comma separated list of file name patterns eligible for speedup (put by hash).
   129  
   130  Patterns are case insensitive and can contain '*' or '?' meta characters.`,
   131  			Examples: []fs.OptionExample{{
   132  				Value: "",
   133  				Help:  "Empty list completely disables speedup (put by hash).",
   134  			}, {
   135  				Value: "*",
   136  				Help:  "All files will be attempted for speedup.",
   137  			}, {
   138  				Value: "*.mkv,*.avi,*.mp4,*.mp3",
   139  				Help:  "Only common audio/video files will be tried for put by hash.",
   140  			}, {
   141  				Value: "*.zip,*.gz,*.rar,*.pdf",
   142  				Help:  "Only common archives or PDF books will be tried for speedup.",
   143  			}},
   144  		}, {
   145  			Name:     "speedup_max_disk",
   146  			Default:  fs.SizeSuffix(3 * 1024 * 1024 * 1024),
   147  			Advanced: true,
   148  			Help: `This option allows you to disable speedup (put by hash) for large files.
   149  
   150  Reason is that preliminary hashing can exhaust your RAM or disk space.`,
   151  			Examples: []fs.OptionExample{{
   152  				Value: "0",
   153  				Help:  "Completely disable speedup (put by hash).",
   154  			}, {
   155  				Value: "1G",
   156  				Help:  "Files larger than 1Gb will be uploaded directly.",
   157  			}, {
   158  				Value: "3G",
   159  				Help:  "Choose this option if you have less than 3Gb free on local disk.",
   160  			}},
   161  		}, {
   162  			Name:     "speedup_max_memory",
   163  			Default:  fs.SizeSuffix(32 * 1024 * 1024),
   164  			Advanced: true,
   165  			Help:     `Files larger than the size given below will always be hashed on disk.`,
   166  			Examples: []fs.OptionExample{{
   167  				Value: "0",
   168  				Help:  "Preliminary hashing will always be done in a temporary disk location.",
   169  			}, {
   170  				Value: "32M",
   171  				Help:  "Do not dedicate more than 32Mb RAM for preliminary hashing.",
   172  			}, {
   173  				Value: "256M",
   174  				Help:  "You have at most 256Mb RAM free for hash calculations.",
   175  			}},
   176  		}, {
   177  			Name:     "check_hash",
   178  			Default:  true,
   179  			Advanced: true,
   180  			Help:     "What should copy do if file checksum is mismatched or invalid.",
   181  			Examples: []fs.OptionExample{{
   182  				Value: "true",
   183  				Help:  "Fail with error.",
   184  			}, {
   185  				Value: "false",
   186  				Help:  "Ignore and continue.",
   187  			}},
   188  		}, {
   189  			Name:     "user_agent",
   190  			Default:  "",
   191  			Advanced: true,
   192  			Hide:     fs.OptionHideBoth,
   193  			Help: `HTTP user agent used internally by client.
   194  
   195  Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
   196  		}, {
   197  			Name:     "quirks",
   198  			Default:  "",
   199  			Advanced: true,
   200  			Hide:     fs.OptionHideBoth,
   201  			Help: `Comma separated list of internal maintenance flags.
   202  
   203  This option must not be used by an ordinary user. It is intended only to
   204  facilitate remote troubleshooting of backend issues. Strict meaning of
   205  flags is not documented and not guaranteed to persist between releases.
   206  Quirks will be removed when the backend grows stable.
   207  Supported quirks: atomicmkdir binlist unknowndirs`,
   208  		}, {
   209  			Name:     config.ConfigEncoding,
   210  			Help:     config.ConfigEncodingHelp,
   211  			Advanced: true,
   212  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
   213  			Default: (encoder.Display |
   214  				encoder.EncodeWin | // :?"*<>|
   215  				encoder.EncodeBackSlash |
   216  				encoder.EncodeInvalidUtf8),
   217  		}}...),
   218  	})
   219  }
   220  
   221  // Options defines the configuration for this backend
   222  type Options struct {
   223  	Username        string               `config:"user"`
   224  	Password        string               `config:"pass"`
   225  	UserAgent       string               `config:"user_agent"`
   226  	CheckHash       bool                 `config:"check_hash"`
   227  	SpeedupEnable   bool                 `config:"speedup_enable"`
   228  	SpeedupPatterns string               `config:"speedup_file_patterns"`
   229  	SpeedupMaxDisk  fs.SizeSuffix        `config:"speedup_max_disk"`
   230  	SpeedupMaxMem   fs.SizeSuffix        `config:"speedup_max_memory"`
   231  	Quirks          string               `config:"quirks"`
   232  	Enc             encoder.MultiEncoder `config:"encoding"`
   233  }
   234  
   235  // retryErrorCodes is a slice of error codes that we will retry
   236  var retryErrorCodes = []int{
   237  	429, // Too Many Requests.
   238  	500, // Internal Server Error
   239  	502, // Bad Gateway
   240  	503, // Service Unavailable
   241  	504, // Gateway Timeout
   242  	509, // Bandwidth Limit Exceeded
   243  }
   244  
   245  // shouldRetry returns a boolean as to whether this response and err
   246  // deserve to be retried. It returns the err as a convenience.
   247  // Retries password authorization (once) in a special case of access denied.
   248  func shouldRetry(ctx context.Context, res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
   249  	if fserrors.ContextError(ctx, &err) {
   250  		return false, err
   251  	}
   252  	if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
   253  		reAuthErr := f.reAuthorize(opts, err)
   254  		return reAuthErr == nil, err // return an original error
   255  	}
   256  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
   257  }
   258  
   259  // errorHandler parses a non 2xx error response into an error
   260  func errorHandler(res *http.Response) (err error) {
   261  	data, err := rest.ReadBody(res)
   262  	if err != nil {
   263  		return err
   264  	}
   265  	fileError := &api.FileErrorResponse{}
   266  	err = json.NewDecoder(bytes.NewReader(data)).Decode(fileError)
   267  	if err == nil {
   268  		fileError.Message = fileError.Body.Home.Error
   269  		return fileError
   270  	}
   271  	serverError := &api.ServerErrorResponse{}
   272  	err = json.NewDecoder(bytes.NewReader(data)).Decode(serverError)
   273  	if err == nil {
   274  		return serverError
   275  	}
   276  	serverError.Message = string(data)
   277  	if serverError.Message == "" || strings.HasPrefix(serverError.Message, "{") {
   278  		// Replace empty or JSON response with a human-readable text.
   279  		serverError.Message = res.Status
   280  	}
   281  	serverError.Status = res.StatusCode
   282  	return serverError
   283  }
   284  
   285  // Fs represents a remote mail.ru
   286  type Fs struct {
   287  	name         string
   288  	root         string             // root path
   289  	opt          Options            // parsed options
   290  	ci           *fs.ConfigInfo     // global config
   291  	speedupGlobs []string           // list of file name patterns eligible for speedup
   292  	speedupAny   bool               // true if all file names are eligible for speedup
   293  	features     *fs.Features       // optional features
   294  	srv          *rest.Client       // REST API client
   295  	cli          *http.Client       // underlying HTTP client (for authorize)
   296  	m            configmap.Mapper   // config reader (for authorize)
   297  	source       oauth2.TokenSource // OAuth token refresher
   298  	pacer        *fs.Pacer          // pacer for API calls
   299  	metaMu       sync.Mutex         // lock for meta server switcher
   300  	metaURL      string             // URL of meta server
   301  	metaExpiry   time.Time          // time to refresh meta server
   302  	shardMu      sync.Mutex         // lock for upload shard switcher
   303  	shardURL     string             // URL of upload shard
   304  	shardExpiry  time.Time          // time to refresh upload shard
   305  	fileServers  serverPool         // file server dispatcher
   306  	authMu       sync.Mutex         // mutex for authorize()
   307  	passFailed   bool               // true if authorize() failed after 403
   308  	quirks       quirks             // internal maintenance flags
   309  }
   310  
   311  // NewFs constructs an Fs from the path, container:path
   312  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
   313  	// fs.Debugf(nil, ">>> NewFs %q %q", name, root)
   314  
   315  	// Parse config into Options struct
   316  	opt := new(Options)
   317  	err := configstruct.Set(m, opt)
   318  	if err != nil {
   319  		return nil, err
   320  	}
   321  	if opt.Password != "" {
   322  		opt.Password = obscure.MustReveal(opt.Password)
   323  	}
   324  
   325  	// Trailing slash signals us to optimize out one file check
   326  	rootIsDir := strings.HasSuffix(root, "/")
   327  	// However the f.root string should not have leading or trailing slashes
   328  	root = strings.Trim(root, "/")
   329  
   330  	ci := fs.GetConfig(ctx)
   331  	f := &Fs{
   332  		name: name,
   333  		root: root,
   334  		opt:  *opt,
   335  		ci:   ci,
   336  		m:    m,
   337  	}
   338  
   339  	if err := f.parseSpeedupPatterns(opt.SpeedupPatterns); err != nil {
   340  		return nil, err
   341  	}
   342  	f.quirks.parseQuirks(opt.Quirks)
   343  
   344  	f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
   345  
   346  	f.features = (&fs.Features{
   347  		CaseInsensitive:         true,
   348  		CanHaveEmptyDirectories: true,
   349  		// Can copy/move across mailru configs (almost, thus true here), but
   350  		// only when they share common account (this is checked in Copy/Move).
   351  		ServerSideAcrossConfigs: true,
   352  	}).Fill(ctx, f)
   353  
   354  	// Override few config settings and create a client
   355  	newCtx, clientConfig := fs.AddConfig(ctx)
   356  	if opt.UserAgent != "" {
   357  		clientConfig.UserAgent = opt.UserAgent
   358  	}
   359  	clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip"
   360  	f.cli = fshttp.NewClient(newCtx)
   361  
   362  	f.srv = rest.NewClient(f.cli)
   363  	f.srv.SetRoot(api.APIServerURL)
   364  	f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
   365  	f.srv.SetErrorHandler(errorHandler)
   366  
   367  	if err = f.authorize(ctx, false); err != nil {
   368  		return nil, err
   369  	}
   370  
   371  	f.fileServers = serverPool{
   372  		pool:      make(pendingServerMap),
   373  		fs:        f,
   374  		path:      "/d",
   375  		expirySec: serverExpirySec,
   376  	}
   377  
   378  	if !rootIsDir {
   379  		_, dirSize, err := f.readItemMetaData(ctx, f.root)
   380  		rootIsDir = (dirSize >= 0)
   381  		// Ignore non-existing item and other errors
   382  		if err == nil && !rootIsDir {
   383  			root = path.Dir(f.root)
   384  			if root == "." {
   385  				root = ""
   386  			}
   387  			f.root = root
   388  			// Return fs that points to the parent and signal rclone to do filtering
   389  			return f, fs.ErrorIsFile
   390  		}
   391  	}
   392  
   393  	return f, nil
   394  }
   395  
   396  // Internal maintenance flags (to be removed when the backend matures).
   397  // Primarily intended to facilitate remote support and troubleshooting.
   398  type quirks struct {
   399  	binlist     bool
   400  	atomicmkdir bool
   401  	unknowndirs bool
   402  }
   403  
   404  func (q *quirks) parseQuirks(option string) {
   405  	for _, flag := range strings.Split(option, ",") {
   406  		switch strings.ToLower(strings.TrimSpace(flag)) {
   407  		case "binlist":
   408  			// The official client sometimes uses a so called "bin" protocol,
   409  			// implemented in the listBin file system method below. This method
   410  			// is generally faster than non-recursive listM1 but results in
   411  			// sporadic deserialization failures if total size of tree data
   412  			// approaches 8Kb (?). The recursive method is normally disabled.
   413  			// This quirk can be used to enable it for further investigation.
   414  			// Remove this quirk when the "bin" protocol support is complete.
   415  			q.binlist = true
   416  		case "atomicmkdir":
   417  			// At the moment rclone requires Mkdir to return success if the
   418  			// directory already exists. However, such programs as borgbackup
   419  			// use mkdir as a locking primitive and depend on its atomicity.
   420  			// Remove this quirk when the above issue is investigated.
   421  			q.atomicmkdir = true
   422  		case "unknowndirs":
   423  			// Accepts unknown resource types as folders.
   424  			q.unknowndirs = true
   425  		default:
   426  			// Ignore unknown flags
   427  		}
   428  	}
   429  }
   430  
   431  // Note: authorize() is not safe for concurrent access as it updates token source
   432  func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
   433  	var t *oauth2.Token
   434  	if !force {
   435  		t, err = oauthutil.GetToken(f.name, f.m)
   436  	}
   437  
   438  	if err != nil || !tokenIsValid(t) {
   439  		fs.Infof(f, "Valid token not found, authorizing.")
   440  		ctx := oauthutil.Context(ctx, f.cli)
   441  		t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
   442  	}
   443  	if err == nil && !tokenIsValid(t) {
   444  		err = errors.New("invalid token")
   445  	}
   446  	if err != nil {
   447  		return fmt.Errorf("failed to authorize: %w", err)
   448  	}
   449  
   450  	if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
   451  		return err
   452  	}
   453  
   454  	// Mailru API server expects access token not in the request header but
   455  	// in the URL query string, so we must use a bare token source rather than
   456  	// client provided by oauthutil.
   457  	//
   458  	// WARNING: direct use of the returned token source triggers a bug in the
   459  	// `(*token != *ts.token)` comparison in oauthutil.TokenSource.Token()
   460  	// crashing with panic `comparing uncomparable type map[string]interface{}`
   461  	// As a workaround, mimic oauth2.NewClient() wrapping token source in
   462  	// oauth2.ReuseTokenSource
   463  	_, ts, err := oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, f.cli)
   464  	if err == nil {
   465  		f.source = oauth2.ReuseTokenSource(nil, ts)
   466  	}
   467  	return err
   468  }
   469  
   470  func tokenIsValid(t *oauth2.Token) bool {
   471  	return t.Valid() && t.RefreshToken != "" && t.Type() == "Bearer"
   472  }
   473  
   474  // reAuthorize is called after getting 403 (access denied) from the server.
   475  // It handles the case when user has changed password since a previous
   476  // rclone invocation and obtains a new access token, if needed.
   477  func (f *Fs) reAuthorize(opts *rest.Opts, origErr error) error {
   478  	// lock and recheck the flag to ensure authorize() is attempted only once
   479  	f.authMu.Lock()
   480  	defer f.authMu.Unlock()
   481  	if f.passFailed {
   482  		return origErr
   483  	}
   484  	ctx := context.Background() // Note: reAuthorize is called by ShouldRetry, no context!
   485  
   486  	fs.Debugf(f, "re-authorize with new password")
   487  	if err := f.authorize(ctx, true); err != nil {
   488  		f.passFailed = true
   489  		return err
   490  	}
   491  
   492  	// obtain new token, if needed
   493  	tokenParameter := ""
   494  	if opts != nil && opts.Parameters.Get("token") != "" {
   495  		tokenParameter = "token"
   496  	}
   497  	if opts != nil && opts.Parameters.Get("access_token") != "" {
   498  		tokenParameter = "access_token"
   499  	}
   500  	if tokenParameter != "" {
   501  		token, err := f.accessToken()
   502  		if err != nil {
   503  			f.passFailed = true
   504  			return err
   505  		}
   506  		opts.Parameters.Set(tokenParameter, token)
   507  	}
   508  
   509  	return nil
   510  }
   511  
   512  // accessToken() returns OAuth token and possibly refreshes it
   513  func (f *Fs) accessToken() (string, error) {
   514  	token, err := f.source.Token()
   515  	if err != nil {
   516  		return "", fmt.Errorf("cannot refresh access token: %w", err)
   517  	}
   518  	return token.AccessToken, nil
   519  }
   520  
   521  // absPath converts root-relative remote to absolute home path
   522  func (f *Fs) absPath(remote string) string {
   523  	return path.Join("/", f.root, remote)
   524  }
   525  
   526  // relPath converts absolute home path to root-relative remote
   527  // Note that f.root can not have leading and trailing slashes
   528  func (f *Fs) relPath(absPath string) (string, error) {
   529  	target := strings.Trim(absPath, "/")
   530  	if f.root == "" {
   531  		return target, nil
   532  	}
   533  	if target == f.root {
   534  		return "", nil
   535  	}
   536  	if strings.HasPrefix(target+"/", f.root+"/") {
   537  		return target[len(f.root)+1:], nil
   538  	}
   539  	return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
   540  }
   541  
   542  // metaServer returns URL of current meta server
   543  func (f *Fs) metaServer(ctx context.Context) (string, error) {
   544  	f.metaMu.Lock()
   545  	defer f.metaMu.Unlock()
   546  
   547  	if f.metaURL != "" && time.Now().Before(f.metaExpiry) {
   548  		return f.metaURL, nil
   549  	}
   550  
   551  	opts := rest.Opts{
   552  		RootURL: api.DispatchServerURL,
   553  		Method:  "GET",
   554  		Path:    "/m",
   555  	}
   556  
   557  	var (
   558  		res *http.Response
   559  		url string
   560  		err error
   561  	)
   562  	err = f.pacer.Call(func() (bool, error) {
   563  		res, err = f.srv.Call(ctx, &opts)
   564  		if err == nil {
   565  			url, err = readBodyWord(res)
   566  		}
   567  		return fserrors.ShouldRetry(err), err
   568  	})
   569  	if err != nil {
   570  		closeBody(res)
   571  		return "", err
   572  	}
   573  	f.metaURL = url
   574  	f.metaExpiry = time.Now().Add(metaExpirySec * time.Second)
   575  	fs.Debugf(f, "new meta server: %s", f.metaURL)
   576  	return f.metaURL, nil
   577  }
   578  
   579  // readBodyWord reads the single line response to completion
   580  // and extracts the first word from the first line.
   581  func readBodyWord(res *http.Response) (word string, err error) {
   582  	var body []byte
   583  	body, err = rest.ReadBody(res)
   584  	if err == nil {
   585  		line := strings.Trim(string(body), " \r\n")
   586  		word = strings.Split(line, " ")[0]
   587  	}
   588  	if word == "" {
   589  		return "", errors.New("empty reply from dispatcher")
   590  	}
   591  	return word, nil
   592  }
   593  
   594  // readItemMetaData returns a file/directory info at given full path
   595  // If it can't be found it fails with fs.ErrorObjectNotFound
   596  // For the return value `dirSize` please see Fs.itemToEntry()
   597  func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEntry, dirSize int, err error) {
   598  	token, err := f.accessToken()
   599  	if err != nil {
   600  		return nil, -1, err
   601  	}
   602  
   603  	opts := rest.Opts{
   604  		Method: "GET",
   605  		Path:   "/api/m1/file",
   606  		Parameters: url.Values{
   607  			"access_token": {token},
   608  			"home":         {f.opt.Enc.FromStandardPath(path)},
   609  			"offset":       {"0"},
   610  			"limit":        {strconv.Itoa(maxInt32)},
   611  		},
   612  	}
   613  
   614  	var info api.ItemInfoResponse
   615  	err = f.pacer.Call(func() (bool, error) {
   616  		res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
   617  		return shouldRetry(ctx, res, err, f, &opts)
   618  	})
   619  
   620  	if err != nil {
   621  		if apiErr, ok := err.(*api.FileErrorResponse); ok {
   622  			switch apiErr.Status {
   623  			case 404:
   624  				err = fs.ErrorObjectNotFound
   625  			case 400:
   626  				fs.Debugf(f, "object %q status %d (%s)", path, apiErr.Status, apiErr.Message)
   627  				err = fs.ErrorObjectNotFound
   628  			}
   629  		}
   630  		return
   631  	}
   632  
   633  	entry, dirSize, err = f.itemToDirEntry(ctx, &info.Body)
   634  	return
   635  }
   636  
   637  // itemToEntry converts API item to rclone directory entry
   638  // The dirSize return value is:
   639  //
   640  //	<0 - for a file or in case of error
   641  //	=0 - for an empty directory
   642  //	>0 - for a non-empty directory
   643  func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) {
   644  	remote, err := f.relPath(f.opt.Enc.ToStandardPath(item.Home))
   645  	if err != nil {
   646  		return nil, -1, err
   647  	}
   648  
   649  	modTime := time.Unix(int64(item.Mtime), 0)
   650  
   651  	isDir, err := f.isDir(item.Kind, remote)
   652  	if err != nil {
   653  		return nil, -1, err
   654  	}
   655  	if isDir {
   656  		dir := fs.NewDir(remote, modTime).SetSize(item.Size)
   657  		return dir, item.Count.Files + item.Count.Folders, nil
   658  	}
   659  
   660  	binHash, err := mrhash.DecodeString(item.Hash)
   661  	if err != nil {
   662  		return nil, -1, err
   663  	}
   664  	file := &Object{
   665  		fs:          f,
   666  		remote:      remote,
   667  		hasMetaData: true,
   668  		size:        item.Size,
   669  		mrHash:      binHash,
   670  		modTime:     modTime,
   671  	}
   672  	return file, -1, nil
   673  }
   674  
   675  // isDir returns true for directories, false for files
   676  func (f *Fs) isDir(kind, path string) (bool, error) {
   677  	switch kind {
   678  	case "":
   679  		return false, errors.New("empty resource type")
   680  	case "file":
   681  		return false, nil
   682  	case "folder":
   683  		// fall thru
   684  	case "camera-upload", "mounted", "shared":
   685  		fs.Debugf(f, "[%s]: folder has type %q", path, kind)
   686  	default:
   687  		if !f.quirks.unknowndirs {
   688  			return false, fmt.Errorf("unknown resource type %q", kind)
   689  		}
   690  		fs.Errorf(f, "[%s]: folder has unknown type %q", path, kind)
   691  	}
   692  	return true, nil
   693  }
   694  
   695  // List the objects and directories in dir into entries.
   696  // The entries can be returned in any order but should be for a complete directory.
   697  // dir should be "" to list the root, and should not have trailing slashes.
   698  // This should return ErrDirNotFound if the directory isn't found.
   699  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   700  	// fs.Debugf(f, ">>> List: %q", dir)
   701  
   702  	if f.quirks.binlist {
   703  		entries, err = f.listBin(ctx, f.absPath(dir), 1)
   704  	} else {
   705  		entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32)
   706  	}
   707  
   708  	if err == nil && f.ci.LogLevel >= fs.LogLevelDebug {
   709  		names := []string{}
   710  		for _, entry := range entries {
   711  			names = append(names, entry.Remote())
   712  		}
   713  		sort.Strings(names)
   714  		// fs.Debugf(f, "List(%q): %v", dir, names)
   715  	}
   716  
   717  	return
   718  }
   719  
   720  // list using protocol "m1"
   721  func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int) (entries fs.DirEntries, err error) {
   722  	token, err := f.accessToken()
   723  	if err != nil {
   724  		return nil, err
   725  	}
   726  
   727  	params := url.Values{}
   728  	params.Set("access_token", token)
   729  	params.Set("offset", strconv.Itoa(offset))
   730  	params.Set("limit", strconv.Itoa(limit))
   731  
   732  	data := url.Values{}
   733  	data.Set("home", f.opt.Enc.FromStandardPath(dirPath))
   734  
   735  	opts := rest.Opts{
   736  		Method:      "POST",
   737  		Path:        "/api/m1/folder",
   738  		Parameters:  params,
   739  		Body:        strings.NewReader(data.Encode()),
   740  		ContentType: api.BinContentType,
   741  	}
   742  
   743  	var (
   744  		info api.FolderInfoResponse
   745  		res  *http.Response
   746  	)
   747  	err = f.pacer.Call(func() (bool, error) {
   748  		res, err = f.srv.CallJSON(ctx, &opts, nil, &info)
   749  		return shouldRetry(ctx, res, err, f, &opts)
   750  	})
   751  
   752  	if err != nil {
   753  		apiErr, ok := err.(*api.FileErrorResponse)
   754  		if ok && apiErr.Status == 404 {
   755  			return nil, fs.ErrorDirNotFound
   756  		}
   757  		return nil, err
   758  	}
   759  
   760  	isDir, err := f.isDir(info.Body.Kind, dirPath)
   761  	if err != nil {
   762  		return nil, err
   763  	}
   764  	if !isDir {
   765  		return nil, fs.ErrorIsFile
   766  	}
   767  
   768  	for _, item := range info.Body.List {
   769  		entry, _, err := f.itemToDirEntry(ctx, &item)
   770  		if err == nil {
   771  			entries = append(entries, entry)
   772  		} else {
   773  			fs.Debugf(f, "Excluding path %q from list: %v", item.Home, err)
   774  		}
   775  	}
   776  	return entries, nil
   777  }
   778  
   779  // list using protocol "bin"
   780  func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs.DirEntries, err error) {
   781  	options := api.ListOptDefaults
   782  
   783  	req := api.NewBinWriter()
   784  	req.WritePu16(api.OperationFolderList)
   785  	req.WriteString(f.opt.Enc.FromStandardPath(dirPath))
   786  	req.WritePu32(int64(depth))
   787  	req.WritePu32(int64(options))
   788  	req.WritePu32(0)
   789  
   790  	token, err := f.accessToken()
   791  	if err != nil {
   792  		return nil, err
   793  	}
   794  	metaURL, err := f.metaServer(ctx)
   795  	if err != nil {
   796  		return nil, err
   797  	}
   798  
   799  	opts := rest.Opts{
   800  		Method:  "POST",
   801  		RootURL: metaURL,
   802  		Parameters: url.Values{
   803  			"client_id": {api.OAuthClientID},
   804  			"token":     {token},
   805  		},
   806  		ContentType: api.BinContentType,
   807  		Body:        req.Reader(),
   808  	}
   809  
   810  	var res *http.Response
   811  	err = f.pacer.Call(func() (bool, error) {
   812  		res, err = f.srv.Call(ctx, &opts)
   813  		return shouldRetry(ctx, res, err, f, &opts)
   814  	})
   815  	if err != nil {
   816  		closeBody(res)
   817  		return nil, err
   818  	}
   819  
   820  	r := api.NewBinReader(res.Body)
   821  	defer closeBody(res)
   822  
   823  	// read status
   824  	switch status := r.ReadByteAsInt(); status {
   825  	case api.ListResultOK:
   826  		// go on...
   827  	case api.ListResultNotExists:
   828  		return nil, fs.ErrorDirNotFound
   829  	default:
   830  		return nil, fmt.Errorf("directory list error %d", status)
   831  	}
   832  
   833  	t := &treeState{
   834  		f:       f,
   835  		r:       r,
   836  		options: options,
   837  		rootDir: parentDir(dirPath),
   838  		lastDir: "",
   839  		level:   0,
   840  	}
   841  	t.currDir = t.rootDir
   842  
   843  	// read revision
   844  	if err := t.revision.Read(r); err != nil {
   845  		return nil, err
   846  	}
   847  
   848  	// read space
   849  	if (options & api.ListOptTotalSpace) != 0 {
   850  		t.totalSpace = int64(r.ReadULong())
   851  	}
   852  	if (options & api.ListOptUsedSpace) != 0 {
   853  		t.usedSpace = int64(r.ReadULong())
   854  	}
   855  
   856  	t.fingerprint = r.ReadBytesByLength()
   857  
   858  	// deserialize
   859  	for {
   860  		entry, err := t.NextRecord()
   861  		if err != nil {
   862  			break
   863  		}
   864  		if entry != nil {
   865  			entries = append(entries, entry)
   866  		}
   867  	}
   868  	if err != nil && err != fs.ErrorListAborted {
   869  		fs.Debugf(f, "listBin failed at offset %d: %v", r.Count(), err)
   870  		return nil, err
   871  	}
   872  	return entries, nil
   873  }
   874  
   875  func (t *treeState) NextRecord() (fs.DirEntry, error) {
   876  	r := t.r
   877  	parseOp := r.ReadByteAsShort()
   878  	if r.Error() != nil {
   879  		return nil, r.Error()
   880  	}
   881  
   882  	switch parseOp {
   883  	case api.ListParseDone:
   884  		return nil, fs.ErrorListAborted
   885  	case api.ListParsePin:
   886  		if t.lastDir == "" {
   887  			return nil, errors.New("last folder is null")
   888  		}
   889  		t.currDir = t.lastDir
   890  		t.level++
   891  		return nil, nil
   892  	case api.ListParsePinUpper:
   893  		if t.currDir == t.rootDir {
   894  			return nil, nil
   895  		}
   896  		if t.level <= 0 {
   897  			return nil, errors.New("no parent folder")
   898  		}
   899  		t.currDir = parentDir(t.currDir)
   900  		t.level--
   901  		return nil, nil
   902  	case api.ListParseUnknown15:
   903  		skip := int(r.ReadPu32())
   904  		for i := 0; i < skip; i++ {
   905  			r.ReadPu32()
   906  			r.ReadPu32()
   907  		}
   908  		return nil, nil
   909  	case api.ListParseReadItem:
   910  		// get item (see below)
   911  	default:
   912  		return nil, fmt.Errorf("unknown parse operation %d", parseOp)
   913  	}
   914  
   915  	// get item
   916  	head := r.ReadIntSpl()
   917  	itemType := head & 3
   918  	if (head & 4096) != 0 {
   919  		t.dunnoNodeID = r.ReadNBytes(api.DunnoNodeIDLength)
   920  	}
   921  	name := t.f.opt.Enc.FromStandardPath(string(r.ReadBytesByLength()))
   922  	t.dunno1 = int(r.ReadULong())
   923  	t.dunno2 = 0
   924  	t.dunno3 = 0
   925  
   926  	if r.Error() != nil {
   927  		return nil, r.Error()
   928  	}
   929  
   930  	var (
   931  		modTime time.Time
   932  		size    int64
   933  		binHash []byte
   934  		dirSize int64
   935  		isDir   = true
   936  	)
   937  
   938  	switch itemType {
   939  	case api.ListItemMountPoint:
   940  		t.treeID = r.ReadNBytes(api.TreeIDLength)
   941  		t.dunno2 = int(r.ReadULong())
   942  		t.dunno3 = int(r.ReadULong())
   943  	case api.ListItemFolder:
   944  		t.dunno2 = int(r.ReadULong())
   945  	case api.ListItemSharedFolder:
   946  		t.dunno2 = int(r.ReadULong())
   947  		t.treeID = r.ReadNBytes(api.TreeIDLength)
   948  	case api.ListItemFile:
   949  		isDir = false
   950  		modTime = r.ReadDate()
   951  		size = int64(r.ReadULong())
   952  		binHash = r.ReadNBytes(mrhash.Size)
   953  	default:
   954  		return nil, fmt.Errorf("unknown item type %d", itemType)
   955  	}
   956  
   957  	if isDir {
   958  		t.lastDir = path.Join(t.currDir, name)
   959  		if (t.options & api.ListOptDelete) != 0 {
   960  			t.dunnoDel1 = int(r.ReadPu32())
   961  			t.dunnoDel2 = int(r.ReadPu32())
   962  		}
   963  		if (t.options & api.ListOptFolderSize) != 0 {
   964  			dirSize = int64(r.ReadULong())
   965  		}
   966  	}
   967  
   968  	if r.Error() != nil {
   969  		return nil, r.Error()
   970  	}
   971  
   972  	if t.f.ci.LogLevel >= fs.LogLevelDebug {
   973  		ctime, _ := modTime.MarshalJSON()
   974  		fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime)
   975  	}
   976  
   977  	if t.level != 1 {
   978  		// TODO: implement recursion and ListR
   979  		// Note: recursion is broken because maximum buffer size is 8K
   980  		return nil, nil
   981  	}
   982  
   983  	remote, err := t.f.relPath(path.Join(t.currDir, name))
   984  	if err != nil {
   985  		return nil, err
   986  	}
   987  	if isDir {
   988  		return fs.NewDir(remote, modTime).SetSize(dirSize), nil
   989  	}
   990  	obj := &Object{
   991  		fs:          t.f,
   992  		remote:      remote,
   993  		hasMetaData: true,
   994  		size:        size,
   995  		mrHash:      binHash,
   996  		modTime:     modTime,
   997  	}
   998  	return obj, nil
   999  }
  1000  
  1001  type treeState struct {
  1002  	f           *Fs
  1003  	r           *api.BinReader
  1004  	options     int
  1005  	rootDir     string
  1006  	currDir     string
  1007  	lastDir     string
  1008  	level       int
  1009  	revision    treeRevision
  1010  	totalSpace  int64
  1011  	usedSpace   int64
  1012  	fingerprint []byte
  1013  	dunno1      int
  1014  	dunno2      int
  1015  	dunno3      int
  1016  	dunnoDel1   int
  1017  	dunnoDel2   int
  1018  	dunnoNodeID []byte
  1019  	treeID      []byte
  1020  }
  1021  
  1022  type treeRevision struct {
  1023  	ver       int16
  1024  	treeID    []byte
  1025  	treeIDNew []byte
  1026  	bgn       uint64
  1027  	bgnNew    uint64
  1028  }
  1029  
  1030  func (rev *treeRevision) Read(data *api.BinReader) error {
  1031  	rev.ver = data.ReadByteAsShort()
  1032  	switch rev.ver {
  1033  	case 0:
  1034  		// Revision()
  1035  	case 1, 2:
  1036  		rev.treeID = data.ReadNBytes(api.TreeIDLength)
  1037  		rev.bgn = data.ReadULong()
  1038  	case 3, 4:
  1039  		rev.treeID = data.ReadNBytes(api.TreeIDLength)
  1040  		rev.bgn = data.ReadULong()
  1041  		rev.treeIDNew = data.ReadNBytes(api.TreeIDLength)
  1042  		rev.bgnNew = data.ReadULong()
  1043  	case 5:
  1044  		rev.treeID = data.ReadNBytes(api.TreeIDLength)
  1045  		rev.bgn = data.ReadULong()
  1046  		rev.treeIDNew = data.ReadNBytes(api.TreeIDLength)
  1047  	default:
  1048  		return fmt.Errorf("unknown directory revision %d", rev.ver)
  1049  	}
  1050  	return data.Error()
  1051  }
  1052  
  1053  // CreateDir makes a directory (parent must exist)
  1054  func (f *Fs) CreateDir(ctx context.Context, path string) error {
  1055  	// fs.Debugf(f, ">>> CreateDir %q", path)
  1056  
  1057  	req := api.NewBinWriter()
  1058  	req.WritePu16(api.OperationCreateFolder)
  1059  	req.WritePu16(0) // revision
  1060  	req.WriteString(f.opt.Enc.FromStandardPath(path))
  1061  	req.WritePu32(0)
  1062  
  1063  	token, err := f.accessToken()
  1064  	if err != nil {
  1065  		return err
  1066  	}
  1067  	metaURL, err := f.metaServer(ctx)
  1068  	if err != nil {
  1069  		return err
  1070  	}
  1071  
  1072  	opts := rest.Opts{
  1073  		Method:  "POST",
  1074  		RootURL: metaURL,
  1075  		Parameters: url.Values{
  1076  			"client_id": {api.OAuthClientID},
  1077  			"token":     {token},
  1078  		},
  1079  		ContentType: api.BinContentType,
  1080  		Body:        req.Reader(),
  1081  	}
  1082  
  1083  	var res *http.Response
  1084  	err = f.pacer.Call(func() (bool, error) {
  1085  		res, err = f.srv.Call(ctx, &opts)
  1086  		return shouldRetry(ctx, res, err, f, &opts)
  1087  	})
  1088  	if err != nil {
  1089  		closeBody(res)
  1090  		return err
  1091  	}
  1092  
  1093  	reply := api.NewBinReader(res.Body)
  1094  	defer closeBody(res)
  1095  
  1096  	switch status := reply.ReadByteAsInt(); status {
  1097  	case api.MkdirResultOK:
  1098  		return nil
  1099  	case api.MkdirResultAlreadyExists, api.MkdirResultExistsDifferentCase:
  1100  		return ErrorDirAlreadyExists
  1101  	case api.MkdirResultSourceNotExists:
  1102  		return ErrorDirSourceNotExists
  1103  	case api.MkdirResultInvalidName:
  1104  		return ErrorInvalidName
  1105  	default:
  1106  		return fmt.Errorf("mkdir error %d", status)
  1107  	}
  1108  }
  1109  
  1110  // Mkdir creates the container (and its parents) if it doesn't exist.
  1111  // Normally it ignores the ErrorDirAlreadyExist, as required by rclone tests.
  1112  // Nevertheless, such programs as borgbackup or restic use mkdir as a locking
  1113  // primitive and depend on its atomicity, i.e. mkdir should fail if directory
  1114  // already exists. As a workaround, users can add string "atomicmkdir" in the
  1115  // hidden `quirks` parameter or in the `--mailru-quirks` command-line option.
  1116  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1117  	// fs.Debugf(f, ">>> Mkdir %q", dir)
  1118  	err := f.mkDirs(ctx, f.absPath(dir))
  1119  	if err == ErrorDirAlreadyExists && !f.quirks.atomicmkdir {
  1120  		return nil
  1121  	}
  1122  	return err
  1123  }
  1124  
  1125  // mkDirs creates container and its parents by absolute path,
  1126  // fails with ErrorDirAlreadyExists if it already exists.
  1127  func (f *Fs) mkDirs(ctx context.Context, path string) error {
  1128  	if path == "/" || path == "" {
  1129  		return nil
  1130  	}
  1131  	switch err := f.CreateDir(ctx, path); err {
  1132  	case nil:
  1133  		return nil
  1134  	case ErrorDirSourceNotExists:
  1135  		fs.Debugf(f, "mkDirs by part %q", path)
  1136  		// fall thru...
  1137  	default:
  1138  		return err
  1139  	}
  1140  	parts := strings.Split(strings.Trim(path, "/"), "/")
  1141  	path = ""
  1142  	for _, part := range parts {
  1143  		if part == "" {
  1144  			continue
  1145  		}
  1146  		path += "/" + part
  1147  		switch err := f.CreateDir(ctx, path); err {
  1148  		case nil, ErrorDirAlreadyExists:
  1149  			continue
  1150  		default:
  1151  			return err
  1152  		}
  1153  	}
  1154  	return nil
  1155  }
  1156  
  1157  func parentDir(absPath string) string {
  1158  	parent := path.Dir(strings.TrimRight(absPath, "/"))
  1159  	if parent == "." {
  1160  		parent = ""
  1161  	}
  1162  	return parent
  1163  }
  1164  
  1165  // mkParentDirs creates parent containers by absolute path,
  1166  // ignores the ErrorDirAlreadyExists
  1167  func (f *Fs) mkParentDirs(ctx context.Context, path string) error {
  1168  	err := f.mkDirs(ctx, parentDir(path))
  1169  	if err == ErrorDirAlreadyExists {
  1170  		return nil
  1171  	}
  1172  	return err
  1173  }
  1174  
  1175  // Rmdir deletes a directory.
  1176  // Returns an error if it isn't empty.
  1177  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1178  	// fs.Debugf(f, ">>> Rmdir %q", dir)
  1179  	return f.purgeWithCheck(ctx, dir, true, "rmdir")
  1180  }
  1181  
  1182  // Purge deletes all the files in the directory
  1183  // Optional interface: Only implement this if you have a way of deleting
  1184  // all the files quicker than just running Remove() on the result of List()
  1185  func (f *Fs) Purge(ctx context.Context, dir string) error {
  1186  	// fs.Debugf(f, ">>> Purge")
  1187  	return f.purgeWithCheck(ctx, dir, false, "purge")
  1188  }
  1189  
  1190  // purgeWithCheck() removes the root directory.
  1191  // Refuses if `check` is set and directory has anything in.
  1192  func (f *Fs) purgeWithCheck(ctx context.Context, dir string, check bool, opName string) error {
  1193  	path := f.absPath(dir)
  1194  	if path == "/" || path == "" {
  1195  		// Mailru will not allow to purge root space returning status 400
  1196  		return fs.ErrorNotDeletingDirs
  1197  	}
  1198  
  1199  	_, dirSize, err := f.readItemMetaData(ctx, path)
  1200  	if err != nil {
  1201  		return fmt.Errorf("%s failed: %w", opName, err)
  1202  	}
  1203  	if check && dirSize > 0 {
  1204  		return fs.ErrorDirectoryNotEmpty
  1205  	}
  1206  	return f.delete(ctx, path, false)
  1207  }
  1208  
  1209  func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
  1210  	token, err := f.accessToken()
  1211  	if err != nil {
  1212  		return err
  1213  	}
  1214  
  1215  	data := url.Values{"home": {f.opt.Enc.FromStandardPath(path)}}
  1216  	opts := rest.Opts{
  1217  		Method: "POST",
  1218  		Path:   "/api/m1/file/remove",
  1219  		Parameters: url.Values{
  1220  			"access_token": {token},
  1221  		},
  1222  		Body:        strings.NewReader(data.Encode()),
  1223  		ContentType: api.BinContentType,
  1224  	}
  1225  
  1226  	var response api.GenericResponse
  1227  	err = f.pacer.Call(func() (bool, error) {
  1228  		res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
  1229  		return shouldRetry(ctx, res, err, f, &opts)
  1230  	})
  1231  
  1232  	switch {
  1233  	case err != nil:
  1234  		return err
  1235  	case response.Status == 200:
  1236  		return nil
  1237  	default:
  1238  		return fmt.Errorf("delete failed with code %d", response.Status)
  1239  	}
  1240  }
  1241  
  1242  // Copy src to this remote using server-side copy operations.
  1243  // This is stored with the remote path given.
  1244  // It returns the destination Object and a possible error.
  1245  // Will only be called if src.Fs().Name() == f.Name()
  1246  // If it isn't possible then return fs.ErrorCantCopy
  1247  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1248  	// fs.Debugf(f, ">>> Copy %q %q", src.Remote(), remote)
  1249  
  1250  	srcObj, ok := src.(*Object)
  1251  	if !ok {
  1252  		fs.Debugf(src, "Can't copy - not same remote type")
  1253  		return nil, fs.ErrorCantCopy
  1254  	}
  1255  	if srcObj.fs.opt.Username != f.opt.Username {
  1256  		// Can copy across mailru configs only if they share common account
  1257  		fs.Debugf(src, "Can't copy - not same account")
  1258  		return nil, fs.ErrorCantCopy
  1259  	}
  1260  
  1261  	srcPath := srcObj.absPath()
  1262  	dstPath := f.absPath(remote)
  1263  	overwrite := false
  1264  	// fs.Debugf(f, "copy %q -> %q\n", srcPath, dstPath)
  1265  
  1266  	err := f.mkParentDirs(ctx, dstPath)
  1267  	if err != nil {
  1268  		return nil, err
  1269  	}
  1270  
  1271  	data := url.Values{}
  1272  	data.Set("home", f.opt.Enc.FromStandardPath(srcPath))
  1273  	data.Set("folder", f.opt.Enc.FromStandardPath(parentDir(dstPath)))
  1274  	data.Set("email", f.opt.Username)
  1275  	data.Set("x-email", f.opt.Username)
  1276  
  1277  	if overwrite {
  1278  		data.Set("conflict", "rewrite")
  1279  	} else {
  1280  		data.Set("conflict", "rename")
  1281  	}
  1282  
  1283  	token, err := f.accessToken()
  1284  	if err != nil {
  1285  		return nil, err
  1286  	}
  1287  
  1288  	opts := rest.Opts{
  1289  		Method: "POST",
  1290  		Path:   "/api/m1/file/copy",
  1291  		Parameters: url.Values{
  1292  			"access_token": {token},
  1293  		},
  1294  		Body:        strings.NewReader(data.Encode()),
  1295  		ContentType: api.BinContentType,
  1296  	}
  1297  
  1298  	var response api.GenericBodyResponse
  1299  	err = f.pacer.Call(func() (bool, error) {
  1300  		res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
  1301  		return shouldRetry(ctx, res, err, f, &opts)
  1302  	})
  1303  
  1304  	if err != nil {
  1305  		return nil, fmt.Errorf("couldn't copy file: %w", err)
  1306  	}
  1307  	if response.Status != 200 {
  1308  		return nil, fmt.Errorf("copy failed with code %d", response.Status)
  1309  	}
  1310  
  1311  	tmpPath := f.opt.Enc.ToStandardPath(response.Body)
  1312  	if tmpPath != dstPath {
  1313  		// fs.Debugf(f, "rename temporary file %q -> %q\n", tmpPath, dstPath)
  1314  		err = f.moveItemBin(ctx, tmpPath, dstPath, "rename temporary file")
  1315  		if err != nil {
  1316  			_ = f.delete(ctx, tmpPath, false) // ignore error
  1317  			return nil, err
  1318  		}
  1319  	}
  1320  
  1321  	// fix modification time at destination
  1322  	dstObj := &Object{
  1323  		fs:     f,
  1324  		remote: remote,
  1325  	}
  1326  	err = dstObj.readMetaData(ctx, true)
  1327  	if err == nil && dstObj.modTime != srcObj.modTime {
  1328  		dstObj.modTime = srcObj.modTime
  1329  		err = dstObj.addFileMetaData(ctx, true)
  1330  	}
  1331  	if err != nil {
  1332  		dstObj = nil
  1333  	}
  1334  	return dstObj, err
  1335  }
  1336  
  1337  // Move src to this remote using server-side move operations.
  1338  // This is stored with the remote path given.
  1339  // It returns the destination Object and a possible error.
  1340  // Will only be called if src.Fs().Name() == f.Name()
  1341  // If it isn't possible then return fs.ErrorCantMove
  1342  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1343  	// fs.Debugf(f, ">>> Move %q %q", src.Remote(), remote)
  1344  
  1345  	srcObj, ok := src.(*Object)
  1346  	if !ok {
  1347  		fs.Debugf(src, "Can't move - not same remote type")
  1348  		return nil, fs.ErrorCantMove
  1349  	}
  1350  	if srcObj.fs.opt.Username != f.opt.Username {
  1351  		// Can move across mailru configs only if they share common account
  1352  		fs.Debugf(src, "Can't move - not same account")
  1353  		return nil, fs.ErrorCantMove
  1354  	}
  1355  
  1356  	srcPath := srcObj.absPath()
  1357  	dstPath := f.absPath(remote)
  1358  
  1359  	err := f.mkParentDirs(ctx, dstPath)
  1360  	if err != nil {
  1361  		return nil, err
  1362  	}
  1363  
  1364  	err = f.moveItemBin(ctx, srcPath, dstPath, "move file")
  1365  	if err != nil {
  1366  		return nil, err
  1367  	}
  1368  
  1369  	return f.NewObject(ctx, remote)
  1370  }
  1371  
  1372  // move/rename an object using BIN protocol
  1373  func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) error {
  1374  	token, err := f.accessToken()
  1375  	if err != nil {
  1376  		return err
  1377  	}
  1378  	metaURL, err := f.metaServer(ctx)
  1379  	if err != nil {
  1380  		return err
  1381  	}
  1382  
  1383  	req := api.NewBinWriter()
  1384  	req.WritePu16(api.OperationRename)
  1385  	req.WritePu32(0) // old revision
  1386  	req.WriteString(f.opt.Enc.FromStandardPath(srcPath))
  1387  	req.WritePu32(0) // new revision
  1388  	req.WriteString(f.opt.Enc.FromStandardPath(dstPath))
  1389  	req.WritePu32(0) // dunno
  1390  
  1391  	opts := rest.Opts{
  1392  		Method:  "POST",
  1393  		RootURL: metaURL,
  1394  		Parameters: url.Values{
  1395  			"client_id": {api.OAuthClientID},
  1396  			"token":     {token},
  1397  		},
  1398  		ContentType: api.BinContentType,
  1399  		Body:        req.Reader(),
  1400  	}
  1401  
  1402  	var res *http.Response
  1403  	err = f.pacer.Call(func() (bool, error) {
  1404  		res, err = f.srv.Call(ctx, &opts)
  1405  		return shouldRetry(ctx, res, err, f, &opts)
  1406  	})
  1407  	if err != nil {
  1408  		closeBody(res)
  1409  		return err
  1410  	}
  1411  
  1412  	reply := api.NewBinReader(res.Body)
  1413  	defer closeBody(res)
  1414  
  1415  	switch status := reply.ReadByteAsInt(); status {
  1416  	case api.MoveResultOK:
  1417  		return nil
  1418  	default:
  1419  		return fmt.Errorf("%s failed with error %d", opName, status)
  1420  	}
  1421  }
  1422  
  1423  // DirMove moves src, srcRemote to this remote at dstRemote
  1424  // using server-side move operations.
  1425  // Will only be called if src.Fs().Name() == f.Name()
  1426  // If it isn't possible then return fs.ErrorCantDirMove
  1427  // If destination exists then return fs.ErrorDirExists
  1428  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1429  	// fs.Debugf(f, ">>> DirMove %q %q", srcRemote, dstRemote)
  1430  
  1431  	srcFs, ok := src.(*Fs)
  1432  	if !ok {
  1433  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  1434  		return fs.ErrorCantDirMove
  1435  	}
  1436  	if srcFs.opt.Username != f.opt.Username {
  1437  		// Can move across mailru configs only if they share common account
  1438  		fs.Debugf(src, "Can't move - not same account")
  1439  		return fs.ErrorCantDirMove
  1440  	}
  1441  	srcPath := srcFs.absPath(srcRemote)
  1442  	dstPath := f.absPath(dstRemote)
  1443  	// fs.Debugf(srcFs, "DirMove [%s]%q --> [%s]%q\n", srcRemote, srcPath, dstRemote, dstPath)
  1444  
  1445  	// Refuse to move to or from the root
  1446  	if len(srcPath) <= len(srcFs.root) || len(dstPath) <= len(f.root) {
  1447  		fs.Debugf(src, "DirMove error: Can't move root")
  1448  		return errors.New("can't move root directory")
  1449  	}
  1450  
  1451  	err := f.mkParentDirs(ctx, dstPath)
  1452  	if err != nil {
  1453  		return err
  1454  	}
  1455  
  1456  	_, _, err = f.readItemMetaData(ctx, dstPath)
  1457  	switch err {
  1458  	case fs.ErrorObjectNotFound:
  1459  		// OK!
  1460  	case nil:
  1461  		return fs.ErrorDirExists
  1462  	default:
  1463  		return err
  1464  	}
  1465  
  1466  	return f.moveItemBin(ctx, srcPath, dstPath, "directory move")
  1467  }
  1468  
  1469  // PublicLink generates a public link to the remote path (usually readable by anyone)
  1470  func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
  1471  	// fs.Debugf(f, ">>> PublicLink %q", remote)
  1472  
  1473  	token, err := f.accessToken()
  1474  	if err != nil {
  1475  		return "", err
  1476  	}
  1477  
  1478  	data := url.Values{}
  1479  	data.Set("home", f.opt.Enc.FromStandardPath(f.absPath(remote)))
  1480  	data.Set("email", f.opt.Username)
  1481  	data.Set("x-email", f.opt.Username)
  1482  
  1483  	opts := rest.Opts{
  1484  		Method: "POST",
  1485  		Path:   "/api/m1/file/publish",
  1486  		Parameters: url.Values{
  1487  			"access_token": {token},
  1488  		},
  1489  		Body:        strings.NewReader(data.Encode()),
  1490  		ContentType: api.BinContentType,
  1491  	}
  1492  
  1493  	var response api.GenericBodyResponse
  1494  	err = f.pacer.Call(func() (bool, error) {
  1495  		res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
  1496  		return shouldRetry(ctx, res, err, f, &opts)
  1497  	})
  1498  
  1499  	if err == nil && response.Body != "" {
  1500  		return api.PublicLinkURL + response.Body, nil
  1501  	}
  1502  	if err == nil {
  1503  		return "", errors.New("server returned empty link")
  1504  	}
  1505  	if apiErr, ok := err.(*api.FileErrorResponse); ok && apiErr.Status == 404 {
  1506  		return "", fs.ErrorObjectNotFound
  1507  	}
  1508  	return "", err
  1509  }
  1510  
  1511  // CleanUp permanently deletes all trashed files/folders
  1512  func (f *Fs) CleanUp(ctx context.Context) error {
  1513  	// fs.Debugf(f, ">>> CleanUp")
  1514  
  1515  	token, err := f.accessToken()
  1516  	if err != nil {
  1517  		return err
  1518  	}
  1519  
  1520  	data := url.Values{
  1521  		"email":   {f.opt.Username},
  1522  		"x-email": {f.opt.Username},
  1523  	}
  1524  	opts := rest.Opts{
  1525  		Method: "POST",
  1526  		Path:   "/api/m1/trashbin/empty",
  1527  		Parameters: url.Values{
  1528  			"access_token": {token},
  1529  		},
  1530  		Body:        strings.NewReader(data.Encode()),
  1531  		ContentType: api.BinContentType,
  1532  	}
  1533  
  1534  	var response api.CleanupResponse
  1535  	err = f.pacer.Call(func() (bool, error) {
  1536  		res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
  1537  		return shouldRetry(ctx, res, err, f, &opts)
  1538  	})
  1539  	if err != nil {
  1540  		return err
  1541  	}
  1542  
  1543  	switch response.StatusStr {
  1544  	case "200":
  1545  		return nil
  1546  	default:
  1547  		return fmt.Errorf("cleanup failed (%s)", response.StatusStr)
  1548  	}
  1549  }
  1550  
  1551  // About gets quota information
  1552  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  1553  	// fs.Debugf(f, ">>> About")
  1554  
  1555  	token, err := f.accessToken()
  1556  	if err != nil {
  1557  		return nil, err
  1558  	}
  1559  	opts := rest.Opts{
  1560  		Method: "GET",
  1561  		Path:   "/api/m1/user",
  1562  		Parameters: url.Values{
  1563  			"access_token": {token},
  1564  		},
  1565  	}
  1566  
  1567  	var info api.UserInfoResponse
  1568  	err = f.pacer.Call(func() (bool, error) {
  1569  		res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
  1570  		return shouldRetry(ctx, res, err, f, &opts)
  1571  	})
  1572  	if err != nil {
  1573  		return nil, err
  1574  	}
  1575  
  1576  	total := info.Body.Cloud.Space.BytesTotal
  1577  	used := info.Body.Cloud.Space.BytesUsed
  1578  
  1579  	usage := &fs.Usage{
  1580  		Total: fs.NewUsageValue(total),
  1581  		Used:  fs.NewUsageValue(used),
  1582  		Free:  fs.NewUsageValue(total - used),
  1583  	}
  1584  	return usage, nil
  1585  }
  1586  
  1587  // Put the object
  1588  // Copy the reader in to the new object which is returned
  1589  // The new object may have been created if an error is returned
  1590  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1591  	o := &Object{
  1592  		fs:      f,
  1593  		remote:  src.Remote(),
  1594  		size:    src.Size(),
  1595  		modTime: src.ModTime(ctx),
  1596  	}
  1597  	// fs.Debugf(f, ">>> Put: %q %d '%v'", o.remote, o.size, o.modTime)
  1598  	return o, o.Update(ctx, in, src, options...)
  1599  }
  1600  
  1601  // Update an existing object
  1602  // Copy the reader into the object updating modTime and size
  1603  // The new object may have been created if an error is returned
  1604  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1605  	wrapIn := in
  1606  	size := src.Size()
  1607  	if size < 0 {
  1608  		return errors.New("mailru does not support streaming uploads")
  1609  	}
  1610  
  1611  	err := o.fs.mkParentDirs(ctx, o.absPath())
  1612  	if err != nil {
  1613  		return err
  1614  	}
  1615  
  1616  	var (
  1617  		fileBuf  []byte
  1618  		fileHash []byte
  1619  		newHash  []byte
  1620  		slowHash bool
  1621  		localSrc bool
  1622  	)
  1623  	if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil {
  1624  		srcFeatures := srcObj.Fs().Features()
  1625  		slowHash = srcFeatures.SlowHash
  1626  		localSrc = srcFeatures.IsLocal
  1627  	}
  1628  
  1629  	// Try speedup if it's globally enabled but skip extra post
  1630  	// request if file is small and fits in the metadata request
  1631  	trySpeedup := o.fs.opt.SpeedupEnable && size > mrhash.Size
  1632  
  1633  	// Try to get the hash if it's instant
  1634  	if trySpeedup && !slowHash {
  1635  		if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
  1636  			fileHash, _ = mrhash.DecodeString(srcHash)
  1637  		}
  1638  		if fileHash != nil {
  1639  			if o.putByHash(ctx, fileHash, src, "source") {
  1640  				return nil
  1641  			}
  1642  			trySpeedup = false // speedup failed, force upload
  1643  		}
  1644  	}
  1645  
  1646  	// Need to calculate hash, check whether file is still eligible for speedup
  1647  	trySpeedup = trySpeedup && o.fs.eligibleForSpeedup(o.Remote(), size, options...)
  1648  
  1649  	// Attempt to put by hash if file is local and eligible
  1650  	if trySpeedup && localSrc {
  1651  		if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
  1652  			fileHash, _ = mrhash.DecodeString(srcHash)
  1653  		}
  1654  		if fileHash != nil && o.putByHash(ctx, fileHash, src, "localfs") {
  1655  			return nil
  1656  		}
  1657  		// If local file hashing has failed, it's pointless to try anymore
  1658  		trySpeedup = false
  1659  	}
  1660  
  1661  	// Attempt to put by calculating hash in memory
  1662  	if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
  1663  		fileBuf, err = io.ReadAll(in)
  1664  		if err != nil {
  1665  			return err
  1666  		}
  1667  		fileHash = mrhash.Sum(fileBuf)
  1668  		if o.putByHash(ctx, fileHash, src, "memory") {
  1669  			return nil
  1670  		}
  1671  		wrapIn = bytes.NewReader(fileBuf)
  1672  		trySpeedup = false // speedup failed, force upload
  1673  	}
  1674  
  1675  	// Attempt to put by hash using a spool file
  1676  	if trySpeedup {
  1677  		tmpFs, err := fs.TemporaryLocalFs(ctx)
  1678  		if err != nil {
  1679  			fs.Infof(tmpFs, "Failed to create spool FS: %v", err)
  1680  		} else {
  1681  			defer func() {
  1682  				if err := operations.Purge(ctx, tmpFs, ""); err != nil {
  1683  					fs.Infof(tmpFs, "Failed to cleanup spool FS: %v", err)
  1684  				}
  1685  			}()
  1686  
  1687  			spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
  1688  			if err != nil {
  1689  				return fmt.Errorf("failed to create spool file: %w", err)
  1690  			}
  1691  			if o.putByHash(ctx, mrHash, src, "spool") {
  1692  				// If put by hash is successful, ignore transitive error
  1693  				return nil
  1694  			}
  1695  			if wrapIn, err = spoolFile.Open(ctx); err != nil {
  1696  				return err
  1697  			}
  1698  			fileHash = mrHash
  1699  		}
  1700  	}
  1701  
  1702  	// Upload object data
  1703  	if size <= mrhash.Size {
  1704  		// Optimize upload: skip extra request if data fits in the hash buffer.
  1705  		if fileBuf == nil {
  1706  			fileBuf, err = io.ReadAll(wrapIn)
  1707  		}
  1708  		if fileHash == nil && err == nil {
  1709  			fileHash = mrhash.Sum(fileBuf)
  1710  		}
  1711  		newHash = fileHash
  1712  	} else {
  1713  		var hasher gohash.Hash
  1714  		if fileHash == nil {
  1715  			// Calculate hash in transit
  1716  			hasher = mrhash.New()
  1717  			wrapIn = io.TeeReader(wrapIn, hasher)
  1718  		}
  1719  		newHash, err = o.upload(ctx, wrapIn, size, options...)
  1720  		if fileHash == nil && err == nil {
  1721  			fileHash = hasher.Sum(nil)
  1722  		}
  1723  	}
  1724  	if err != nil {
  1725  		return err
  1726  	}
  1727  
  1728  	if !bytes.Equal(fileHash, newHash) {
  1729  		if o.fs.opt.CheckHash {
  1730  			return mrhash.ErrorInvalidHash
  1731  		}
  1732  		fs.Infof(o, "hash mismatch on upload: expected %x received %x", fileHash, newHash)
  1733  	}
  1734  	o.mrHash = newHash
  1735  	o.size = size
  1736  	o.modTime = src.ModTime(ctx)
  1737  	return o.addFileMetaData(ctx, true)
  1738  }
  1739  
  1740  // eligibleForSpeedup checks whether file is eligible for speedup method (put by hash)
  1741  func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOption) bool {
  1742  	if !f.opt.SpeedupEnable {
  1743  		return false
  1744  	}
  1745  	if size <= mrhash.Size || size < speedupMinSize || size >= int64(f.opt.SpeedupMaxDisk) {
  1746  		return false
  1747  	}
  1748  	_, _, partial := getTransferRange(size, options...)
  1749  	if partial {
  1750  		return false
  1751  	}
  1752  	if f.speedupAny {
  1753  		return true
  1754  	}
  1755  	if f.speedupGlobs == nil {
  1756  		return false
  1757  	}
  1758  	nameLower := strings.ToLower(strings.TrimSpace(path.Base(remote)))
  1759  	for _, pattern := range f.speedupGlobs {
  1760  		if matches, _ := filepath.Match(pattern, nameLower); matches {
  1761  			return true
  1762  		}
  1763  	}
  1764  	return false
  1765  }
  1766  
  1767  // parseSpeedupPatterns converts pattern string into list of unique glob patterns
  1768  func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
  1769  	f.speedupGlobs = nil
  1770  	f.speedupAny = false
  1771  	uniqueValidPatterns := make(map[string]interface{})
  1772  
  1773  	for _, pattern := range strings.Split(patternString, ",") {
  1774  		pattern = strings.ToLower(strings.TrimSpace(pattern))
  1775  		if pattern == "" {
  1776  			continue
  1777  		}
  1778  		if pattern == "*" {
  1779  			f.speedupAny = true
  1780  		}
  1781  		if _, err := filepath.Match(pattern, ""); err != nil {
  1782  			return fmt.Errorf("invalid file name pattern %q", pattern)
  1783  		}
  1784  		uniqueValidPatterns[pattern] = nil
  1785  	}
  1786  	for pattern := range uniqueValidPatterns {
  1787  		f.speedupGlobs = append(f.speedupGlobs, pattern)
  1788  	}
  1789  	return nil
  1790  }
  1791  
  1792  // putByHash is a thin wrapper around addFileMetaData
  1793  func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
  1794  	oNew := new(Object)
  1795  	*oNew = *o
  1796  	oNew.mrHash = mrHash
  1797  	oNew.size = info.Size()
  1798  	oNew.modTime = info.ModTime(ctx)
  1799  	if err := oNew.addFileMetaData(ctx, true); err != nil {
  1800  		fs.Debugf(o, "Cannot put by hash from %s, performing upload", method)
  1801  		return false
  1802  	}
  1803  	*o = *oNew
  1804  	fs.Debugf(o, "File has been put by hash from %s", method)
  1805  	return true
  1806  }
  1807  
  1808  func makeTempFile(ctx context.Context, tmpFs fs.Fs, wrapIn io.Reader, src fs.ObjectInfo) (spoolFile fs.Object, mrHash []byte, err error) {
  1809  	// Local temporary file system must support SHA1
  1810  	hashType := hash.SHA1
  1811  
  1812  	// Calculate Mailru and spool verification hashes in transit
  1813  	hashSet := hash.NewHashSet(MrHashType, hashType)
  1814  	hasher, err := hash.NewMultiHasherTypes(hashSet)
  1815  	if err != nil {
  1816  		return nil, nil, err
  1817  	}
  1818  	wrapIn = io.TeeReader(wrapIn, hasher)
  1819  
  1820  	// Copy stream into spool file
  1821  	tmpInfo := object.NewStaticObjectInfo(src.Remote(), src.ModTime(ctx), src.Size(), false, nil, nil)
  1822  	hashOption := &fs.HashesOption{Hashes: hashSet}
  1823  	if spoolFile, err = tmpFs.Put(ctx, wrapIn, tmpInfo, hashOption); err != nil {
  1824  		return nil, nil, err
  1825  	}
  1826  
  1827  	// Validate spool file
  1828  	sums := hasher.Sums()
  1829  	checkSum := sums[hashType]
  1830  	fileSum, err := spoolFile.Hash(ctx, hashType)
  1831  	if spoolFile.Size() != src.Size() || err != nil || checkSum == "" || fileSum != checkSum {
  1832  		return nil, nil, mrhash.ErrorInvalidHash
  1833  	}
  1834  
  1835  	mrHash, err = mrhash.DecodeString(sums[MrHashType])
  1836  	return
  1837  }
  1838  
  1839  func (o *Object) upload(ctx context.Context, in io.Reader, size int64, options ...fs.OpenOption) ([]byte, error) {
  1840  	token, err := o.fs.accessToken()
  1841  	if err != nil {
  1842  		return nil, err
  1843  	}
  1844  	shardURL, err := o.fs.uploadShard(ctx)
  1845  	if err != nil {
  1846  		return nil, err
  1847  	}
  1848  
  1849  	opts := rest.Opts{
  1850  		Method:        "PUT",
  1851  		RootURL:       shardURL,
  1852  		Body:          in,
  1853  		Options:       options,
  1854  		ContentLength: &size,
  1855  		Parameters: url.Values{
  1856  			"client_id": {api.OAuthClientID},
  1857  			"token":     {token},
  1858  		},
  1859  		ExtraHeaders: map[string]string{
  1860  			"Accept": "*/*",
  1861  		},
  1862  	}
  1863  
  1864  	var (
  1865  		res     *http.Response
  1866  		strHash string
  1867  	)
  1868  	err = o.fs.pacer.Call(func() (bool, error) {
  1869  		res, err = o.fs.srv.Call(ctx, &opts)
  1870  		if err == nil {
  1871  			strHash, err = readBodyWord(res)
  1872  		}
  1873  		return fserrors.ShouldRetry(err), err
  1874  	})
  1875  	if err != nil {
  1876  		closeBody(res)
  1877  		return nil, err
  1878  	}
  1879  
  1880  	switch res.StatusCode {
  1881  	case 200, 201:
  1882  		return mrhash.DecodeString(strHash)
  1883  	default:
  1884  		return nil, fmt.Errorf("upload failed with code %s (%d)", res.Status, res.StatusCode)
  1885  	}
  1886  }
  1887  
  1888  func (f *Fs) uploadShard(ctx context.Context) (string, error) {
  1889  	f.shardMu.Lock()
  1890  	defer f.shardMu.Unlock()
  1891  
  1892  	if f.shardURL != "" && time.Now().Before(f.shardExpiry) {
  1893  		return f.shardURL, nil
  1894  	}
  1895  
  1896  	opts := rest.Opts{
  1897  		RootURL: api.DispatchServerURL,
  1898  		Method:  "GET",
  1899  		Path:    "/u",
  1900  	}
  1901  
  1902  	var (
  1903  		res *http.Response
  1904  		url string
  1905  		err error
  1906  	)
  1907  	err = f.pacer.Call(func() (bool, error) {
  1908  		res, err = f.srv.Call(ctx, &opts)
  1909  		if err == nil {
  1910  			url, err = readBodyWord(res)
  1911  		}
  1912  		return fserrors.ShouldRetry(err), err
  1913  	})
  1914  	if err != nil {
  1915  		closeBody(res)
  1916  		return "", err
  1917  	}
  1918  
  1919  	f.shardURL = url
  1920  	f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
  1921  	fs.Debugf(f, "new upload shard: %s", f.shardURL)
  1922  
  1923  	return f.shardURL, nil
  1924  }
  1925  
  1926  // Object describes a mailru object
  1927  type Object struct {
  1928  	fs          *Fs       // what this object is part of
  1929  	remote      string    // The remote path
  1930  	hasMetaData bool      // whether info below has been set
  1931  	size        int64     // Bytes in the object
  1932  	modTime     time.Time // Modified time of the object
  1933  	mrHash      []byte    // Mail.ru flavored SHA1 hash of the object
  1934  }
  1935  
  1936  // NewObject finds an Object at the remote.
  1937  // If object can't be found it fails with fs.ErrorObjectNotFound
  1938  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1939  	// fs.Debugf(f, ">>> NewObject %q", remote)
  1940  	o := &Object{
  1941  		fs:     f,
  1942  		remote: remote,
  1943  	}
  1944  	err := o.readMetaData(ctx, true)
  1945  	if err != nil {
  1946  		return nil, err
  1947  	}
  1948  	return o, nil
  1949  }
  1950  
  1951  // absPath converts root-relative remote to absolute home path
  1952  func (o *Object) absPath() string {
  1953  	return o.fs.absPath(o.remote)
  1954  }
  1955  
  1956  // Object.readMetaData reads and fills a file info
  1957  // If object can't be found it fails with fs.ErrorObjectNotFound
  1958  func (o *Object) readMetaData(ctx context.Context, force bool) error {
  1959  	if o.hasMetaData && !force {
  1960  		return nil
  1961  	}
  1962  	entry, dirSize, err := o.fs.readItemMetaData(ctx, o.absPath())
  1963  	if err != nil {
  1964  		return err
  1965  	}
  1966  	newObj, ok := entry.(*Object)
  1967  	if !ok || dirSize >= 0 {
  1968  		return fs.ErrorIsDir
  1969  	}
  1970  	if newObj.remote != o.remote {
  1971  		return fmt.Errorf("file %q path has changed to %q", o.remote, newObj.remote)
  1972  	}
  1973  	o.hasMetaData = true
  1974  	o.size = newObj.size
  1975  	o.modTime = newObj.modTime
  1976  	o.mrHash = newObj.mrHash
  1977  	return nil
  1978  }
  1979  
  1980  // Fs returns the parent Fs
  1981  func (o *Object) Fs() fs.Info {
  1982  	return o.fs
  1983  }
  1984  
  1985  // Return a string version
  1986  func (o *Object) String() string {
  1987  	if o == nil {
  1988  		return "<nil>"
  1989  	}
  1990  	//return fmt.Sprintf("[%s]%q", o.fs.root, o.remote)
  1991  	return o.remote
  1992  }
  1993  
  1994  // Remote returns the remote path
  1995  func (o *Object) Remote() string {
  1996  	return o.remote
  1997  }
  1998  
  1999  // ModTime returns the modification time of the object
  2000  // It attempts to read the objects mtime and if that isn't present the
  2001  // LastModified returned in the http headers
  2002  func (o *Object) ModTime(ctx context.Context) time.Time {
  2003  	err := o.readMetaData(ctx, false)
  2004  	if err != nil {
  2005  		fs.Errorf(o, "%v", err)
  2006  	}
  2007  	return o.modTime
  2008  }
  2009  
  2010  // Size returns the size of an object in bytes
  2011  func (o *Object) Size() int64 {
  2012  	ctx := context.Background() // Note: Object.Size does not pass context!
  2013  	err := o.readMetaData(ctx, false)
  2014  	if err != nil {
  2015  		fs.Errorf(o, "%v", err)
  2016  	}
  2017  	return o.size
  2018  }
  2019  
  2020  // Hash returns the MD5 or SHA1 sum of an object
  2021  // returning a lowercase hex string
  2022  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  2023  	if t == MrHashType {
  2024  		return hex.EncodeToString(o.mrHash), nil
  2025  	}
  2026  	return "", hash.ErrUnsupported
  2027  }
  2028  
  2029  // Storable returns whether this object is storable
  2030  func (o *Object) Storable() bool {
  2031  	return true
  2032  }
  2033  
  2034  // SetModTime sets the modification time of the local fs object
  2035  //
  2036  // Commits the datastore
  2037  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  2038  	// fs.Debugf(o, ">>> SetModTime [%v]", modTime)
  2039  	o.modTime = modTime
  2040  	return o.addFileMetaData(ctx, true)
  2041  }
  2042  
  2043  func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
  2044  	if len(o.mrHash) != mrhash.Size {
  2045  		return mrhash.ErrorInvalidHash
  2046  	}
  2047  	token, err := o.fs.accessToken()
  2048  	if err != nil {
  2049  		return err
  2050  	}
  2051  	metaURL, err := o.fs.metaServer(ctx)
  2052  	if err != nil {
  2053  		return err
  2054  	}
  2055  
  2056  	req := api.NewBinWriter()
  2057  	req.WritePu16(api.OperationAddFile)
  2058  	req.WritePu16(0) // revision
  2059  	req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
  2060  	req.WritePu64(o.size)
  2061  	req.WriteP64(o.modTime.Unix())
  2062  	req.WritePu32(0)
  2063  	req.Write(o.mrHash)
  2064  
  2065  	if overwrite {
  2066  		// overwrite
  2067  		req.WritePu32(1)
  2068  	} else {
  2069  		// don't add if not changed, add with rename if changed
  2070  		req.WritePu32(55)
  2071  		req.Write(o.mrHash)
  2072  		req.WritePu64(o.size)
  2073  	}
  2074  
  2075  	opts := rest.Opts{
  2076  		Method:  "POST",
  2077  		RootURL: metaURL,
  2078  		Parameters: url.Values{
  2079  			"client_id": {api.OAuthClientID},
  2080  			"token":     {token},
  2081  		},
  2082  		ContentType: api.BinContentType,
  2083  		Body:        req.Reader(),
  2084  	}
  2085  
  2086  	var res *http.Response
  2087  	err = o.fs.pacer.Call(func() (bool, error) {
  2088  		res, err = o.fs.srv.Call(ctx, &opts)
  2089  		return shouldRetry(ctx, res, err, o.fs, &opts)
  2090  	})
  2091  	if err != nil {
  2092  		closeBody(res)
  2093  		return err
  2094  	}
  2095  
  2096  	reply := api.NewBinReader(res.Body)
  2097  	defer closeBody(res)
  2098  
  2099  	switch status := reply.ReadByteAsInt(); status {
  2100  	case api.AddResultOK, api.AddResultNotModified, api.AddResultDunno04, api.AddResultDunno09:
  2101  		return nil
  2102  	case api.AddResultInvalidName:
  2103  		return ErrorInvalidName
  2104  	default:
  2105  		return fmt.Errorf("add file error %d", status)
  2106  	}
  2107  }
  2108  
  2109  // Remove an object
  2110  func (o *Object) Remove(ctx context.Context) error {
  2111  	// fs.Debugf(o, ">>> Remove")
  2112  	return o.fs.delete(ctx, o.absPath(), false)
  2113  }
  2114  
  2115  // getTransferRange detects partial transfers and calculates start/end offsets into file
  2116  func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end int64, partial bool) {
  2117  	var offset, limit int64 = 0, -1
  2118  
  2119  	for _, option := range options {
  2120  		switch opt := option.(type) {
  2121  		case *fs.SeekOption:
  2122  			offset = opt.Offset
  2123  		case *fs.RangeOption:
  2124  			offset, limit = opt.Decode(size)
  2125  		default:
  2126  			if option.Mandatory() {
  2127  				fs.Errorf(nil, "Unsupported mandatory option: %v", option)
  2128  			}
  2129  		}
  2130  	}
  2131  	if limit < 0 {
  2132  		limit = size - offset
  2133  	}
  2134  	end = offset + limit
  2135  	if end > size {
  2136  		end = size
  2137  	}
  2138  	partial = !(offset == 0 && end == size)
  2139  	return offset, end, partial
  2140  }
  2141  
  2142  // Open an object for read and download its content
  2143  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2144  	// fs.Debugf(o, ">>> Open")
  2145  
  2146  	token, err := o.fs.accessToken()
  2147  	if err != nil {
  2148  		return nil, err
  2149  	}
  2150  
  2151  	start, end, partialRequest := getTransferRange(o.size, options...)
  2152  
  2153  	headers := map[string]string{
  2154  		"Accept":       "*/*",
  2155  		"Content-Type": "application/octet-stream",
  2156  	}
  2157  	if partialRequest {
  2158  		rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
  2159  		headers["Range"] = rangeStr
  2160  		// headers["Content-Range"] = rangeStr
  2161  		headers["Accept-Ranges"] = "bytes"
  2162  	}
  2163  
  2164  	// TODO: set custom timeouts
  2165  	opts := rest.Opts{
  2166  		Method:  "GET",
  2167  		Options: options,
  2168  		Path:    url.PathEscape(strings.TrimLeft(o.fs.opt.Enc.FromStandardPath(o.absPath()), "/")),
  2169  		Parameters: url.Values{
  2170  			"client_id": {api.OAuthClientID},
  2171  			"token":     {token},
  2172  		},
  2173  		ExtraHeaders: headers,
  2174  	}
  2175  
  2176  	var res *http.Response
  2177  	server := ""
  2178  	err = o.fs.pacer.Call(func() (bool, error) {
  2179  		server, err = o.fs.fileServers.Dispatch(ctx, server)
  2180  		if err != nil {
  2181  			return false, err
  2182  		}
  2183  		opts.RootURL = server
  2184  		res, err = o.fs.srv.Call(ctx, &opts)
  2185  		return shouldRetry(ctx, res, err, o.fs, &opts)
  2186  	})
  2187  	if err != nil {
  2188  		if res != nil && res.Body != nil {
  2189  			closeBody(res)
  2190  		}
  2191  		return nil, err
  2192  	}
  2193  
  2194  	// Server should respond with Status 206 and Content-Range header to a range
  2195  	// request. Status 200 (and no Content-Range) means a full-content response.
  2196  	partialResponse := res.StatusCode == 206
  2197  
  2198  	var (
  2199  		hasher     gohash.Hash
  2200  		wrapStream io.ReadCloser
  2201  	)
  2202  	if !partialResponse {
  2203  		// Cannot check hash of partial download
  2204  		hasher = mrhash.New()
  2205  	}
  2206  	wrapStream = &endHandler{
  2207  		ctx:    ctx,
  2208  		stream: res.Body,
  2209  		hasher: hasher,
  2210  		o:      o,
  2211  		server: server,
  2212  	}
  2213  	if partialRequest && !partialResponse {
  2214  		fs.Debugf(o, "Server returned full content instead of range")
  2215  		if start > 0 {
  2216  			// Discard the beginning of the data
  2217  			_, err = io.CopyN(io.Discard, wrapStream, start)
  2218  			if err != nil {
  2219  				closeBody(res)
  2220  				return nil, err
  2221  			}
  2222  		}
  2223  		wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
  2224  	}
  2225  	return wrapStream, nil
  2226  }
  2227  
  2228  type endHandler struct {
  2229  	ctx    context.Context
  2230  	stream io.ReadCloser
  2231  	hasher gohash.Hash
  2232  	o      *Object
  2233  	server string
  2234  	done   bool
  2235  }
  2236  
  2237  func (e *endHandler) Read(p []byte) (n int, err error) {
  2238  	n, err = e.stream.Read(p)
  2239  	if e.hasher != nil {
  2240  		// hasher will not return an error, just panic
  2241  		_, _ = e.hasher.Write(p[:n])
  2242  	}
  2243  	if err != nil { // io.Error or EOF
  2244  		err = e.handle(err)
  2245  	}
  2246  	return
  2247  }
  2248  
  2249  func (e *endHandler) Close() error {
  2250  	_ = e.handle(nil) // ignore returned error
  2251  	return e.stream.Close()
  2252  }
  2253  
  2254  func (e *endHandler) handle(err error) error {
  2255  	if e.done {
  2256  		return err
  2257  	}
  2258  	e.done = true
  2259  	o := e.o
  2260  
  2261  	o.fs.fileServers.Free(e.server)
  2262  	if err != io.EOF || e.hasher == nil {
  2263  		return err
  2264  	}
  2265  
  2266  	newHash := e.hasher.Sum(nil)
  2267  	if bytes.Equal(o.mrHash, newHash) {
  2268  		return io.EOF
  2269  	}
  2270  	if o.fs.opt.CheckHash {
  2271  		return mrhash.ErrorInvalidHash
  2272  	}
  2273  	fs.Infof(o, "hash mismatch on download: expected %x received %x", o.mrHash, newHash)
  2274  	return io.EOF
  2275  }
  2276  
  2277  // serverPool backs server dispatcher
  2278  type serverPool struct {
  2279  	pool      pendingServerMap
  2280  	mu        sync.Mutex
  2281  	path      string
  2282  	expirySec int
  2283  	fs        *Fs
  2284  }
  2285  
  2286  type pendingServerMap map[string]*pendingServer
  2287  
  2288  type pendingServer struct {
  2289  	locks  int
  2290  	expiry time.Time
  2291  }
  2292  
  2293  // Dispatch dispatches next download server.
  2294  // It prefers switching and tries to avoid current server
  2295  // in use by caller because it may be overloaded or slow.
  2296  func (p *serverPool) Dispatch(ctx context.Context, current string) (string, error) {
  2297  	now := time.Now()
  2298  	url := p.getServer(current, now)
  2299  	if url != "" {
  2300  		return url, nil
  2301  	}
  2302  
  2303  	// Server not found - ask Mailru dispatcher.
  2304  	opts := rest.Opts{
  2305  		Method:  "GET",
  2306  		RootURL: api.DispatchServerURL,
  2307  		Path:    p.path,
  2308  	}
  2309  	var (
  2310  		res *http.Response
  2311  		err error
  2312  	)
  2313  	err = p.fs.pacer.Call(func() (bool, error) {
  2314  		res, err = p.fs.srv.Call(ctx, &opts)
  2315  		if err != nil {
  2316  			return fserrors.ShouldRetry(err), err
  2317  		}
  2318  		url, err = readBodyWord(res)
  2319  		return fserrors.ShouldRetry(err), err
  2320  	})
  2321  	if err != nil || url == "" {
  2322  		closeBody(res)
  2323  		return "", fmt.Errorf("failed to request file server: %w", err)
  2324  	}
  2325  
  2326  	p.addServer(url, now)
  2327  	return url, nil
  2328  }
  2329  
  2330  func (p *serverPool) Free(url string) {
  2331  	if url == "" {
  2332  		return
  2333  	}
  2334  	p.mu.Lock()
  2335  	defer p.mu.Unlock()
  2336  
  2337  	srv := p.pool[url]
  2338  	if srv == nil {
  2339  		return
  2340  	}
  2341  
  2342  	if srv.locks <= 0 {
  2343  		// Getting here indicates possible race
  2344  		fs.Infof(p.fs, "Purge file server:  locks -, url %s", url)
  2345  		delete(p.pool, url)
  2346  		return
  2347  	}
  2348  
  2349  	srv.locks--
  2350  	if srv.locks == 0 && time.Now().After(srv.expiry) {
  2351  		delete(p.pool, url)
  2352  		fs.Debugf(p.fs, "Free file server:   locks 0, url %s", url)
  2353  		return
  2354  	}
  2355  	fs.Debugf(p.fs, "Unlock file server: locks %d, url %s", srv.locks, url)
  2356  }
  2357  
  2358  // Find an underlocked server
  2359  func (p *serverPool) getServer(current string, now time.Time) string {
  2360  	p.mu.Lock()
  2361  	defer p.mu.Unlock()
  2362  
  2363  	for url, srv := range p.pool {
  2364  		if url == "" || srv.locks < 0 {
  2365  			continue // Purged server slot
  2366  		}
  2367  		if url == current {
  2368  			continue // Current server - prefer another
  2369  		}
  2370  		if srv.locks >= maxServerLocks {
  2371  			continue // Overlocked server
  2372  		}
  2373  		if now.After(srv.expiry) {
  2374  			continue // Expired server
  2375  		}
  2376  
  2377  		srv.locks++
  2378  		fs.Debugf(p.fs, "Lock file server:   locks %d, url %s", srv.locks, url)
  2379  		return url
  2380  	}
  2381  
  2382  	return ""
  2383  }
  2384  
  2385  func (p *serverPool) addServer(url string, now time.Time) {
  2386  	p.mu.Lock()
  2387  	defer p.mu.Unlock()
  2388  
  2389  	expiry := now.Add(time.Duration(p.expirySec) * time.Second)
  2390  
  2391  	expiryStr := []byte("-")
  2392  	if p.fs.ci.LogLevel >= fs.LogLevelInfo {
  2393  		expiryStr, _ = expiry.MarshalJSON()
  2394  	}
  2395  
  2396  	// Attach to a server proposed by dispatcher
  2397  	srv := p.pool[url]
  2398  	if srv != nil {
  2399  		srv.locks++
  2400  		srv.expiry = expiry
  2401  		fs.Debugf(p.fs, "Reuse file server:  locks %d, url %s, expiry %s", srv.locks, url, expiryStr)
  2402  		return
  2403  	}
  2404  
  2405  	// Add new server
  2406  	p.pool[url] = &pendingServer{locks: 1, expiry: expiry}
  2407  	fs.Debugf(p.fs, "Switch file server: locks 1, url %s, expiry %s", url, expiryStr)
  2408  }
  2409  
  2410  // Name of the remote (as passed into NewFs)
  2411  func (f *Fs) Name() string {
  2412  	return f.name
  2413  }
  2414  
  2415  // Root of the remote (as passed into NewFs)
  2416  func (f *Fs) Root() string {
  2417  	return f.root
  2418  }
  2419  
  2420  // String converts this Fs to a string
  2421  func (f *Fs) String() string {
  2422  	return fmt.Sprintf("[%s]", f.root)
  2423  }
  2424  
  2425  // Precision return the precision of this Fs
  2426  func (f *Fs) Precision() time.Duration {
  2427  	return time.Second
  2428  }
  2429  
  2430  // Hashes returns the supported hash sets
  2431  func (f *Fs) Hashes() hash.Set {
  2432  	return hash.Set(MrHashType)
  2433  }
  2434  
  2435  // Features returns the optional features of this Fs
  2436  func (f *Fs) Features() *fs.Features {
  2437  	return f.features
  2438  }
  2439  
  2440  // close response body ignoring errors
  2441  func closeBody(res *http.Response) {
  2442  	if res != nil {
  2443  		_ = res.Body.Close()
  2444  	}
  2445  }
  2446  
  2447  // Check the interfaces are satisfied
  2448  var (
  2449  	_ fs.Fs           = (*Fs)(nil)
  2450  	_ fs.Purger       = (*Fs)(nil)
  2451  	_ fs.Copier       = (*Fs)(nil)
  2452  	_ fs.Mover        = (*Fs)(nil)
  2453  	_ fs.DirMover     = (*Fs)(nil)
  2454  	_ fs.PublicLinker = (*Fs)(nil)
  2455  	_ fs.CleanUpper   = (*Fs)(nil)
  2456  	_ fs.Abouter      = (*Fs)(nil)
  2457  	_ fs.Object       = (*Object)(nil)
  2458  )