github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/mailru/mailru.go (about)

     1  package mailru
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	gohash "hash"
     8  	"io"
     9  	"path"
    10  	"path/filepath"
    11  	"sort"
    12  	"strconv"
    13  	"strings"
    14  	"sync"
    15  	"time"
    16  
    17  	"encoding/hex"
    18  	"encoding/json"
    19  	"io/ioutil"
    20  	"net/http"
    21  	"net/url"
    22  
    23  	"github.com/rclone/rclone/backend/mailru/api"
    24  	"github.com/rclone/rclone/backend/mailru/mrhash"
    25  
    26  	"github.com/rclone/rclone/fs"
    27  	"github.com/rclone/rclone/fs/config"
    28  	"github.com/rclone/rclone/fs/config/configmap"
    29  	"github.com/rclone/rclone/fs/config/configstruct"
    30  	"github.com/rclone/rclone/fs/config/obscure"
    31  	"github.com/rclone/rclone/fs/fserrors"
    32  	"github.com/rclone/rclone/fs/fshttp"
    33  	"github.com/rclone/rclone/fs/hash"
    34  	"github.com/rclone/rclone/fs/object"
    35  	"github.com/rclone/rclone/fs/operations"
    36  
    37  	"github.com/rclone/rclone/lib/encoder"
    38  	"github.com/rclone/rclone/lib/oauthutil"
    39  	"github.com/rclone/rclone/lib/pacer"
    40  	"github.com/rclone/rclone/lib/rest"
    41  
    42  	"github.com/pkg/errors"
    43  	"golang.org/x/oauth2"
    44  )
    45  
    46  // Global constants
    47  const (
    48  	minSleepPacer   = 10 * time.Millisecond
    49  	maxSleepPacer   = 2 * time.Second
    50  	decayConstPacer = 2          // bigger for slower decay, exponential
    51  	metaExpirySec   = 20 * 60    // meta server expiration time
    52  	serverExpirySec = 3 * 60     // download server expiration time
    53  	shardExpirySec  = 30 * 60    // upload server expiration time
    54  	maxServerLocks  = 4          // maximum number of locks per single download server
    55  	maxInt32        = 2147483647 // used as limit in directory list request
    56  	speedupMinSize  = 512        // speedup is not optimal if data is smaller than average packet
    57  )
    58  
    59  // Global errors
    60  var (
    61  	ErrorDirAlreadyExists   = errors.New("directory already exists")
    62  	ErrorDirSourceNotExists = errors.New("directory source does not exist")
    63  	ErrorInvalidName        = errors.New("invalid characters in object name")
    64  
    65  	// MrHashType is the hash.Type for Mailru
    66  	MrHashType hash.Type
    67  )
    68  
    69  // Description of how to authorize
    70  var oauthConfig = &oauth2.Config{
    71  	ClientID:     api.OAuthClientID,
    72  	ClientSecret: "",
    73  	Endpoint: oauth2.Endpoint{
    74  		AuthURL:   api.OAuthURL,
    75  		TokenURL:  api.OAuthURL,
    76  		AuthStyle: oauth2.AuthStyleInParams,
    77  	},
    78  }
    79  
    80  // Register with Fs
    81  func init() {
    82  	MrHashType = hash.RegisterHash("MailruHash", 40, mrhash.New)
    83  	fs.Register(&fs.RegInfo{
    84  		Name:        "mailru",
    85  		Description: "Mail.ru Cloud",
    86  		NewFs:       NewFs,
    87  		Options: []fs.Option{{
    88  			Name:     "user",
    89  			Help:     "User name (usually email)",
    90  			Required: true,
    91  		}, {
    92  			Name:       "pass",
    93  			Help:       "Password",
    94  			Required:   true,
    95  			IsPassword: true,
    96  		}, {
    97  			Name:     "speedup_enable",
    98  			Default:  true,
    99  			Advanced: false,
   100  			Help: `Skip full upload if there is another file with same data hash.
   101  This feature is called "speedup" or "put by hash". It is especially efficient
   102  in case of generally available files like popular books, video or audio clips,
   103  because files are searched by hash in all accounts of all mailru users.
   104  Please note that rclone may need local memory and disk space to calculate
   105  content hash in advance and decide whether full upload is required.
   106  Also, if rclone does not know file size in advance (e.g. in case of
   107  streaming or partial uploads), it will not even try this optimization.`,
   108  			Examples: []fs.OptionExample{{
   109  				Value: "true",
   110  				Help:  "Enable",
   111  			}, {
   112  				Value: "false",
   113  				Help:  "Disable",
   114  			}},
   115  		}, {
   116  			Name:     "speedup_file_patterns",
   117  			Default:  "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf",
   118  			Advanced: true,
   119  			Help: `Comma separated list of file name patterns eligible for speedup (put by hash).
   120  Patterns are case insensitive and can contain '*' or '?' meta characters.`,
   121  			Examples: []fs.OptionExample{{
   122  				Value: "",
   123  				Help:  "Empty list completely disables speedup (put by hash).",
   124  			}, {
   125  				Value: "*",
   126  				Help:  "All files will be attempted for speedup.",
   127  			}, {
   128  				Value: "*.mkv,*.avi,*.mp4,*.mp3",
   129  				Help:  "Only common audio/video files will be tried for put by hash.",
   130  			}, {
   131  				Value: "*.zip,*.gz,*.rar,*.pdf",
   132  				Help:  "Only common archives or PDF books will be tried for speedup.",
   133  			}},
   134  		}, {
   135  			Name:     "speedup_max_disk",
   136  			Default:  fs.SizeSuffix(3 * 1024 * 1024 * 1024),
   137  			Advanced: true,
   138  			Help: `This option allows you to disable speedup (put by hash) for large files
   139  (because preliminary hashing can exhaust you RAM or disk space)`,
   140  			Examples: []fs.OptionExample{{
   141  				Value: "0",
   142  				Help:  "Completely disable speedup (put by hash).",
   143  			}, {
   144  				Value: "1G",
   145  				Help:  "Files larger than 1Gb will be uploaded directly.",
   146  			}, {
   147  				Value: "3G",
   148  				Help:  "Choose this option if you have less than 3Gb free on local disk.",
   149  			}},
   150  		}, {
   151  			Name:     "speedup_max_memory",
   152  			Default:  fs.SizeSuffix(32 * 1024 * 1024),
   153  			Advanced: true,
   154  			Help:     `Files larger than the size given below will always be hashed on disk.`,
   155  			Examples: []fs.OptionExample{{
   156  				Value: "0",
   157  				Help:  "Preliminary hashing will always be done in a temporary disk location.",
   158  			}, {
   159  				Value: "32M",
   160  				Help:  "Do not dedicate more than 32Mb RAM for preliminary hashing.",
   161  			}, {
   162  				Value: "256M",
   163  				Help:  "You have at most 256Mb RAM free for hash calculations.",
   164  			}},
   165  		}, {
   166  			Name:     "check_hash",
   167  			Default:  true,
   168  			Advanced: true,
   169  			Help:     "What should copy do if file checksum is mismatched or invalid",
   170  			Examples: []fs.OptionExample{{
   171  				Value: "true",
   172  				Help:  "Fail with error.",
   173  			}, {
   174  				Value: "false",
   175  				Help:  "Ignore and continue.",
   176  			}},
   177  		}, {
   178  			Name:     "user_agent",
   179  			Default:  "",
   180  			Advanced: true,
   181  			Hide:     fs.OptionHideBoth,
   182  			Help: `HTTP user agent used internally by client.
   183  Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
   184  		}, {
   185  			Name:     "quirks",
   186  			Default:  "",
   187  			Advanced: true,
   188  			Hide:     fs.OptionHideBoth,
   189  			Help: `Comma separated list of internal maintenance flags.
   190  This option must not be used by an ordinary user. It is intended only to
   191  facilitate remote troubleshooting of backend issues. Strict meaning of
   192  flags is not documented and not guaranteed to persist between releases.
   193  Quirks will be removed when the backend grows stable.
   194  Supported quirks: atomicmkdir binlist gzip insecure retry400`,
   195  		}, {
   196  			Name:     config.ConfigEncoding,
   197  			Help:     config.ConfigEncodingHelp,
   198  			Advanced: true,
   199  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
   200  			Default: (encoder.Display |
   201  				encoder.EncodeWin | // :?"*<>|
   202  				encoder.EncodeBackSlash |
   203  				encoder.EncodeInvalidUtf8),
   204  		}},
   205  	})
   206  }
   207  
   208  // Options defines the configuration for this backend
   209  type Options struct {
   210  	Username        string               `config:"user"`
   211  	Password        string               `config:"pass"`
   212  	UserAgent       string               `config:"user_agent"`
   213  	CheckHash       bool                 `config:"check_hash"`
   214  	SpeedupEnable   bool                 `config:"speedup_enable"`
   215  	SpeedupPatterns string               `config:"speedup_file_patterns"`
   216  	SpeedupMaxDisk  fs.SizeSuffix        `config:"speedup_max_disk"`
   217  	SpeedupMaxMem   fs.SizeSuffix        `config:"speedup_max_memory"`
   218  	Quirks          string               `config:"quirks"`
   219  	Enc             encoder.MultiEncoder `config:"encoding"`
   220  }
   221  
   222  // retryErrorCodes is a slice of error codes that we will retry
   223  var retryErrorCodes = []int{
   224  	429, // Too Many Requests.
   225  	500, // Internal Server Error
   226  	502, // Bad Gateway
   227  	503, // Service Unavailable
   228  	504, // Gateway Timeout
   229  	509, // Bandwidth Limit Exceeded
   230  }
   231  
   232  // shouldRetry returns a boolean as to whether this response and err
   233  // deserve to be retried. It returns the err as a convenience.
   234  // Retries password authorization (once) in a special case of access denied.
   235  func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
   236  	if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
   237  		reAuthErr := f.reAuthorize(opts, err)
   238  		return reAuthErr == nil, err // return an original error
   239  	}
   240  	if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
   241  		return true, err
   242  	}
   243  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
   244  }
   245  
   246  // errorHandler parses a non 2xx error response into an error
   247  func errorHandler(res *http.Response) (err error) {
   248  	data, err := rest.ReadBody(res)
   249  	if err != nil {
   250  		return err
   251  	}
   252  	fileError := &api.FileErrorResponse{}
   253  	err = json.NewDecoder(bytes.NewReader(data)).Decode(fileError)
   254  	if err == nil {
   255  		fileError.Message = fileError.Body.Home.Error
   256  		return fileError
   257  	}
   258  	serverError := &api.ServerErrorResponse{}
   259  	err = json.NewDecoder(bytes.NewReader(data)).Decode(serverError)
   260  	if err == nil {
   261  		return serverError
   262  	}
   263  	serverError.Message = string(data)
   264  	if serverError.Message == "" || strings.HasPrefix(serverError.Message, "{") {
   265  		// Replace empty or JSON response with a human readable text.
   266  		serverError.Message = res.Status
   267  	}
   268  	serverError.Status = res.StatusCode
   269  	return serverError
   270  }
   271  
   272  // Fs represents a remote mail.ru
   273  type Fs struct {
   274  	name         string
   275  	root         string             // root path
   276  	opt          Options            // parsed options
   277  	speedupGlobs []string           // list of file name patterns eligible for speedup
   278  	speedupAny   bool               // true if all file names are aligible for speedup
   279  	features     *fs.Features       // optional features
   280  	srv          *rest.Client       // REST API client
   281  	cli          *http.Client       // underlying HTTP client (for authorize)
   282  	m            configmap.Mapper   // config reader (for authorize)
   283  	source       oauth2.TokenSource // OAuth token refresher
   284  	pacer        *fs.Pacer          // pacer for API calls
   285  	metaMu       sync.Mutex         // lock for meta server switcher
   286  	metaURL      string             // URL of meta server
   287  	metaExpiry   time.Time          // time to refresh meta server
   288  	shardMu      sync.Mutex         // lock for upload shard switcher
   289  	shardURL     string             // URL of upload shard
   290  	shardExpiry  time.Time          // time to refresh upload shard
   291  	fileServers  serverPool         // file server dispatcher
   292  	authMu       sync.Mutex         // mutex for authorize()
   293  	passFailed   bool               // true if authorize() failed after 403
   294  	quirks       quirks             // internal maintenance flags
   295  }
   296  
   297  // NewFs constructs an Fs from the path, container:path
   298  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   299  	// fs.Debugf(nil, ">>> NewFs %q %q", name, root)
   300  	ctx := context.Background() // Note: NewFs does not pass context!
   301  
   302  	// Parse config into Options struct
   303  	opt := new(Options)
   304  	err := configstruct.Set(m, opt)
   305  	if err != nil {
   306  		return nil, err
   307  	}
   308  	if opt.Password != "" {
   309  		opt.Password = obscure.MustReveal(opt.Password)
   310  	}
   311  
   312  	// Trailing slash signals us to optimize out one file check
   313  	rootIsDir := strings.HasSuffix(root, "/")
   314  	// However the f.root string should not have leading or trailing slashes
   315  	root = strings.Trim(root, "/")
   316  
   317  	f := &Fs{
   318  		name: name,
   319  		root: root,
   320  		opt:  *opt,
   321  		m:    m,
   322  	}
   323  
   324  	if err := f.parseSpeedupPatterns(opt.SpeedupPatterns); err != nil {
   325  		return nil, err
   326  	}
   327  	f.quirks.parseQuirks(opt.Quirks)
   328  
   329  	f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
   330  
   331  	f.features = (&fs.Features{
   332  		CaseInsensitive:         true,
   333  		CanHaveEmptyDirectories: true,
   334  		// Can copy/move across mailru configs (almost, thus true here), but
   335  		// only when they share common account (this is checked in Copy/Move).
   336  		ServerSideAcrossConfigs: true,
   337  	}).Fill(f)
   338  
   339  	// Override few config settings and create a client
   340  	clientConfig := *fs.Config
   341  	if opt.UserAgent != "" {
   342  		clientConfig.UserAgent = opt.UserAgent
   343  	}
   344  	clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client
   345  	f.cli = fshttp.NewClient(&clientConfig)
   346  
   347  	f.srv = rest.NewClient(f.cli)
   348  	f.srv.SetRoot(api.APIServerURL)
   349  	f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
   350  	f.srv.SetErrorHandler(errorHandler)
   351  
   352  	if f.quirks.insecure {
   353  		transport := f.cli.Transport.(*fshttp.Transport).Transport
   354  		transport.TLSClientConfig.InsecureSkipVerify = true
   355  		transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}}
   356  	}
   357  
   358  	if err = f.authorize(ctx, false); err != nil {
   359  		return nil, err
   360  	}
   361  
   362  	f.fileServers = serverPool{
   363  		pool:      make(pendingServerMap),
   364  		fs:        f,
   365  		path:      "/d",
   366  		expirySec: serverExpirySec,
   367  	}
   368  
   369  	if !rootIsDir {
   370  		_, dirSize, err := f.readItemMetaData(ctx, f.root)
   371  		rootIsDir = (dirSize >= 0)
   372  		// Ignore non-existing item and other errors
   373  		if err == nil && !rootIsDir {
   374  			root = path.Dir(f.root)
   375  			if root == "." {
   376  				root = ""
   377  			}
   378  			f.root = root
   379  			// Return fs that points to the parent and signal rclone to do filtering
   380  			return f, fs.ErrorIsFile
   381  		}
   382  	}
   383  
   384  	return f, nil
   385  }
   386  
   387  // Internal maintenance flags (to be removed when the backend matures).
   388  // Primarily intended to facilitate remote support and troubleshooting.
   389  type quirks struct {
   390  	gzip        bool
   391  	insecure    bool
   392  	binlist     bool
   393  	atomicmkdir bool
   394  	retry400    bool
   395  }
   396  
   397  func (q *quirks) parseQuirks(option string) {
   398  	for _, flag := range strings.Split(option, ",") {
   399  		switch strings.ToLower(strings.TrimSpace(flag)) {
   400  		case "gzip":
   401  			// This backend mimics the official client which never sends the
   402  			// "Accept-Encoding: gzip" header. However, enabling compression
   403  			// might be good for performance.
   404  			// Use this quirk to investigate the performance impact.
   405  			// Remove this quirk if performance does not improve.
   406  			q.gzip = true
   407  		case "insecure":
   408  			// The mailru disk-o protocol is not documented. To compare HTTP
   409  			// stream against the official client one can use Telerik Fiddler,
   410  			// which introduces a self-signed certificate. This quirk forces
   411  			// the Go http layer to accept it.
   412  			// Remove this quirk when the backend reaches maturity.
   413  			q.insecure = true
   414  		case "binlist":
   415  			// The official client sometimes uses a so called "bin" protocol,
   416  			// implemented in the listBin file system method below. This method
   417  			// is generally faster than non-recursive listM1 but results in
   418  			// sporadic deserialization failures if total size of tree data
   419  			// approaches 8Kb (?). The recursive method is normally disabled.
   420  			// This quirk can be used to enable it for further investigation.
   421  			// Remove this quirk when the "bin" protocol support is complete.
   422  			q.binlist = true
   423  		case "atomicmkdir":
   424  			// At the moment rclone requires Mkdir to return success if the
   425  			// directory already exists. However, such programs as borgbackup
   426  			// or restic use mkdir as a locking primitive and depend on its
   427  			// atomicity. This quirk is a workaround. It can be removed
   428  			// when the above issue is investigated.
   429  			q.atomicmkdir = true
   430  		case "retry400":
   431  			// This quirk will help in troubleshooting a very rare "Error 400"
   432  			// issue. It can be removed if the problem does not show up
   433  			// for a year or so. See the below issue:
   434  			// https://github.com/ivandeex/rclone/issues/14
   435  			q.retry400 = true
   436  		default:
   437  			// Just ignore all unknown flags
   438  		}
   439  	}
   440  }
   441  
   442  // Note: authorize() is not safe for concurrent access as it updates token source
   443  func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
   444  	var t *oauth2.Token
   445  	if !force {
   446  		t, err = oauthutil.GetToken(f.name, f.m)
   447  	}
   448  
   449  	if err != nil || !tokenIsValid(t) {
   450  		fs.Infof(f, "Valid token not found, authorizing.")
   451  		ctx := oauthutil.Context(f.cli)
   452  		t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
   453  	}
   454  	if err == nil && !tokenIsValid(t) {
   455  		err = errors.New("Invalid token")
   456  	}
   457  	if err != nil {
   458  		return errors.Wrap(err, "Failed to authorize")
   459  	}
   460  
   461  	if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
   462  		return err
   463  	}
   464  
   465  	// Mailru API server expects access token not in the request header but
   466  	// in the URL query string, so we must use a bare token source rather than
   467  	// client provided by oauthutil.
   468  	//
   469  	// WARNING: direct use of the returned token source triggers a bug in the
   470  	// `(*token != *ts.token)` comparison in oauthutil.TokenSource.Token()
   471  	// crashing with panic `comparing uncomparable type map[string]interface{}`
   472  	// As a workaround, mimic oauth2.NewClient() wrapping token source in
   473  	// oauth2.ReuseTokenSource
   474  	_, ts, err := oauthutil.NewClientWithBaseClient(f.name, f.m, oauthConfig, f.cli)
   475  	if err == nil {
   476  		f.source = oauth2.ReuseTokenSource(nil, ts)
   477  	}
   478  	return err
   479  }
   480  
   481  func tokenIsValid(t *oauth2.Token) bool {
   482  	return t.Valid() && t.RefreshToken != "" && t.Type() == "Bearer"
   483  }
   484  
   485  // reAuthorize is called after getting 403 (access denied) from the server.
   486  // It handles the case when user has changed password since a previous
   487  // rclone invocation and obtains a new access token, if needed.
   488  func (f *Fs) reAuthorize(opts *rest.Opts, origErr error) error {
   489  	// lock and recheck the flag to ensure authorize() is attempted only once
   490  	f.authMu.Lock()
   491  	defer f.authMu.Unlock()
   492  	if f.passFailed {
   493  		return origErr
   494  	}
   495  	ctx := context.Background() // Note: reAuthorize is called by ShouldRetry, no context!
   496  
   497  	fs.Debugf(f, "re-authorize with new password")
   498  	if err := f.authorize(ctx, true); err != nil {
   499  		f.passFailed = true
   500  		return err
   501  	}
   502  
   503  	// obtain new token, if needed
   504  	tokenParameter := ""
   505  	if opts != nil && opts.Parameters.Get("token") != "" {
   506  		tokenParameter = "token"
   507  	}
   508  	if opts != nil && opts.Parameters.Get("access_token") != "" {
   509  		tokenParameter = "access_token"
   510  	}
   511  	if tokenParameter != "" {
   512  		token, err := f.accessToken()
   513  		if err != nil {
   514  			f.passFailed = true
   515  			return err
   516  		}
   517  		opts.Parameters.Set(tokenParameter, token)
   518  	}
   519  
   520  	return nil
   521  }
   522  
   523  // accessToken() returns OAuth token and possibly refreshes it
   524  func (f *Fs) accessToken() (string, error) {
   525  	token, err := f.source.Token()
   526  	if err != nil {
   527  		return "", errors.Wrap(err, "cannot refresh access token")
   528  	}
   529  	return token.AccessToken, nil
   530  }
   531  
   532  // absPath converts root-relative remote to absolute home path
   533  func (f *Fs) absPath(remote string) string {
   534  	return path.Join("/", f.root, remote)
   535  }
   536  
   537  // relPath converts absolute home path to root-relative remote
   538  // Note that f.root can not have leading and trailing slashes
   539  func (f *Fs) relPath(absPath string) (string, error) {
   540  	target := strings.Trim(absPath, "/")
   541  	if f.root == "" {
   542  		return target, nil
   543  	}
   544  	if target == f.root {
   545  		return "", nil
   546  	}
   547  	if strings.HasPrefix(target+"/", f.root+"/") {
   548  		return target[len(f.root)+1:], nil
   549  	}
   550  	return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
   551  }
   552  
   553  // metaServer ...
   554  func (f *Fs) metaServer(ctx context.Context) (string, error) {
   555  	f.metaMu.Lock()
   556  	defer f.metaMu.Unlock()
   557  
   558  	if f.metaURL != "" && time.Now().Before(f.metaExpiry) {
   559  		return f.metaURL, nil
   560  	}
   561  
   562  	opts := rest.Opts{
   563  		RootURL: api.DispatchServerURL,
   564  		Method:  "GET",
   565  		Path:    "/m",
   566  	}
   567  
   568  	var (
   569  		res *http.Response
   570  		url string
   571  		err error
   572  	)
   573  	err = f.pacer.Call(func() (bool, error) {
   574  		res, err = f.srv.Call(ctx, &opts)
   575  		if err == nil {
   576  			url, err = readBodyWord(res)
   577  		}
   578  		return fserrors.ShouldRetry(err), err
   579  	})
   580  	if err != nil {
   581  		closeBody(res)
   582  		return "", err
   583  	}
   584  	f.metaURL = url
   585  	f.metaExpiry = time.Now().Add(metaExpirySec * time.Second)
   586  	fs.Debugf(f, "new meta server: %s", f.metaURL)
   587  	return f.metaURL, nil
   588  }
   589  
   590  // readBodyWord reads the single line response to completion
   591  // and extracts the first word from the first line.
   592  func readBodyWord(res *http.Response) (word string, err error) {
   593  	var body []byte
   594  	body, err = rest.ReadBody(res)
   595  	if err == nil {
   596  		line := strings.Trim(string(body), " \r\n")
   597  		word = strings.Split(line, " ")[0]
   598  	}
   599  	if word == "" {
   600  		return "", errors.New("Empty reply from dispatcher")
   601  	}
   602  	return word, nil
   603  }
   604  
   605  // readItemMetaData returns a file/directory info at given full path
   606  // If it can't be found it fails with fs.ErrorObjectNotFound
   607  // For the return value `dirSize` please see Fs.itemToEntry()
   608  func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEntry, dirSize int, err error) {
   609  	token, err := f.accessToken()
   610  	if err != nil {
   611  		return nil, -1, err
   612  	}
   613  
   614  	opts := rest.Opts{
   615  		Method: "GET",
   616  		Path:   "/api/m1/file",
   617  		Parameters: url.Values{
   618  			"access_token": {token},
   619  			"home":         {f.opt.Enc.FromStandardPath(path)},
   620  			"offset":       {"0"},
   621  			"limit":        {strconv.Itoa(maxInt32)},
   622  		},
   623  	}
   624  
   625  	var info api.ItemInfoResponse
   626  	err = f.pacer.Call(func() (bool, error) {
   627  		res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
   628  		return shouldRetry(res, err, f, &opts)
   629  	})
   630  
   631  	if err != nil {
   632  		if apiErr, ok := err.(*api.FileErrorResponse); ok {
   633  			switch apiErr.Status {
   634  			case 404:
   635  				err = fs.ErrorObjectNotFound
   636  			case 400:
   637  				fs.Debugf(f, "object %q status %d (%s)", path, apiErr.Status, apiErr.Message)
   638  				err = fs.ErrorObjectNotFound
   639  			}
   640  		}
   641  		return
   642  	}
   643  
   644  	entry, dirSize, err = f.itemToDirEntry(ctx, &info.Body)
   645  	return
   646  }
   647  
   648  // itemToEntry converts API item to rclone directory entry
   649  // The dirSize return value is:
   650  //   <0 - for a file or in case of error
   651  //   =0 - for an empty directory
   652  //   >0 - for a non-empty directory
   653  func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) {
   654  	remote, err := f.relPath(f.opt.Enc.ToStandardPath(item.Home))
   655  	if err != nil {
   656  		return nil, -1, err
   657  	}
   658  	switch item.Kind {
   659  	case "folder":
   660  		dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
   661  		dirSize := item.Count.Files + item.Count.Folders
   662  		return dir, dirSize, nil
   663  	case "file":
   664  		binHash, err := mrhash.DecodeString(item.Hash)
   665  		if err != nil {
   666  			return nil, -1, err
   667  		}
   668  		file := &Object{
   669  			fs:          f,
   670  			remote:      remote,
   671  			hasMetaData: true,
   672  			size:        item.Size,
   673  			mrHash:      binHash,
   674  			modTime:     time.Unix(item.Mtime, 0),
   675  		}
   676  		return file, -1, nil
   677  	default:
   678  		return nil, -1, fmt.Errorf("Unknown resource type %q", item.Kind)
   679  	}
   680  }
   681  
   682  // List the objects and directories in dir into entries.
   683  // The entries can be returned in any order but should be for a complete directory.
   684  // dir should be "" to list the root, and should not have trailing slashes.
   685  // This should return ErrDirNotFound if the directory isn't found.
   686  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   687  	// fs.Debugf(f, ">>> List: %q", dir)
   688  
   689  	if f.quirks.binlist {
   690  		entries, err = f.listBin(ctx, f.absPath(dir), 1)
   691  	} else {
   692  		entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32)
   693  	}
   694  
   695  	if err == nil && fs.Config.LogLevel >= fs.LogLevelDebug {
   696  		names := []string{}
   697  		for _, entry := range entries {
   698  			names = append(names, entry.Remote())
   699  		}
   700  		sort.Strings(names)
   701  		// fs.Debugf(f, "List(%q): %v", dir, names)
   702  	}
   703  
   704  	return
   705  }
   706  
   707  // list using protocol "m1"
   708  func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int) (entries fs.DirEntries, err error) {
   709  	token, err := f.accessToken()
   710  	if err != nil {
   711  		return nil, err
   712  	}
   713  
   714  	params := url.Values{}
   715  	params.Set("access_token", token)
   716  	params.Set("offset", strconv.Itoa(offset))
   717  	params.Set("limit", strconv.Itoa(limit))
   718  
   719  	data := url.Values{}
   720  	data.Set("home", f.opt.Enc.FromStandardPath(dirPath))
   721  
   722  	opts := rest.Opts{
   723  		Method:      "POST",
   724  		Path:        "/api/m1/folder",
   725  		Parameters:  params,
   726  		Body:        strings.NewReader(data.Encode()),
   727  		ContentType: api.BinContentType,
   728  	}
   729  
   730  	var (
   731  		info api.FolderInfoResponse
   732  		res  *http.Response
   733  	)
   734  	err = f.pacer.Call(func() (bool, error) {
   735  		res, err = f.srv.CallJSON(ctx, &opts, nil, &info)
   736  		return shouldRetry(res, err, f, &opts)
   737  	})
   738  
   739  	if err != nil {
   740  		apiErr, ok := err.(*api.FileErrorResponse)
   741  		if ok && apiErr.Status == 404 {
   742  			return nil, fs.ErrorDirNotFound
   743  		}
   744  		return nil, err
   745  	}
   746  
   747  	if info.Body.Kind != "folder" {
   748  		return nil, fs.ErrorIsFile
   749  	}
   750  
   751  	for _, item := range info.Body.List {
   752  		entry, _, err := f.itemToDirEntry(ctx, &item)
   753  		if err == nil {
   754  			entries = append(entries, entry)
   755  		} else {
   756  			fs.Debugf(f, "Excluding path %q from list: %v", item.Home, err)
   757  		}
   758  	}
   759  	return entries, nil
   760  }
   761  
   762  // list using protocol "bin"
   763  func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs.DirEntries, err error) {
   764  	options := api.ListOptDefaults
   765  
   766  	req := api.NewBinWriter()
   767  	req.WritePu16(api.OperationFolderList)
   768  	req.WriteString(f.opt.Enc.FromStandardPath(dirPath))
   769  	req.WritePu32(int64(depth))
   770  	req.WritePu32(int64(options))
   771  	req.WritePu32(0)
   772  
   773  	token, err := f.accessToken()
   774  	if err != nil {
   775  		return nil, err
   776  	}
   777  	metaURL, err := f.metaServer(ctx)
   778  	if err != nil {
   779  		return nil, err
   780  	}
   781  
   782  	opts := rest.Opts{
   783  		Method:  "POST",
   784  		RootURL: metaURL,
   785  		Parameters: url.Values{
   786  			"client_id": {api.OAuthClientID},
   787  			"token":     {token},
   788  		},
   789  		ContentType: api.BinContentType,
   790  		Body:        req.Reader(),
   791  	}
   792  
   793  	var res *http.Response
   794  	err = f.pacer.Call(func() (bool, error) {
   795  		res, err = f.srv.Call(ctx, &opts)
   796  		return shouldRetry(res, err, f, &opts)
   797  	})
   798  	if err != nil {
   799  		closeBody(res)
   800  		return nil, err
   801  	}
   802  
   803  	r := api.NewBinReader(res.Body)
   804  	defer closeBody(res)
   805  
   806  	// read status
   807  	switch status := r.ReadByteAsInt(); status {
   808  	case api.ListResultOK:
   809  		// go on...
   810  	case api.ListResultNotExists:
   811  		return nil, fs.ErrorDirNotFound
   812  	default:
   813  		return nil, fmt.Errorf("directory list error %d", status)
   814  	}
   815  
   816  	t := &treeState{
   817  		f:       f,
   818  		r:       r,
   819  		options: options,
   820  		rootDir: parentDir(dirPath),
   821  		lastDir: "",
   822  		level:   0,
   823  	}
   824  	t.currDir = t.rootDir
   825  
   826  	// read revision
   827  	if err := t.revision.Read(r); err != nil {
   828  		return nil, err
   829  	}
   830  
   831  	// read space
   832  	if (options & api.ListOptTotalSpace) != 0 {
   833  		t.totalSpace = int64(r.ReadULong())
   834  	}
   835  	if (options & api.ListOptUsedSpace) != 0 {
   836  		t.usedSpace = int64(r.ReadULong())
   837  	}
   838  
   839  	t.fingerprint = r.ReadBytesByLength()
   840  
   841  	// deserialize
   842  	for {
   843  		entry, err := t.NextRecord()
   844  		if err != nil {
   845  			break
   846  		}
   847  		if entry != nil {
   848  			entries = append(entries, entry)
   849  		}
   850  	}
   851  	if err != nil && err != fs.ErrorListAborted {
   852  		fs.Debugf(f, "listBin failed at offset %d: %v", r.Count(), err)
   853  		return nil, err
   854  	}
   855  	return entries, nil
   856  }
   857  
   858  func (t *treeState) NextRecord() (fs.DirEntry, error) {
   859  	r := t.r
   860  	parseOp := r.ReadByteAsShort()
   861  	if r.Error() != nil {
   862  		return nil, r.Error()
   863  	}
   864  
   865  	switch parseOp {
   866  	case api.ListParseDone:
   867  		return nil, fs.ErrorListAborted
   868  	case api.ListParsePin:
   869  		if t.lastDir == "" {
   870  			return nil, errors.New("last folder is null")
   871  		}
   872  		t.currDir = t.lastDir
   873  		t.level++
   874  		return nil, nil
   875  	case api.ListParsePinUpper:
   876  		if t.currDir == t.rootDir {
   877  			return nil, nil
   878  		}
   879  		if t.level <= 0 {
   880  			return nil, errors.New("no parent folder")
   881  		}
   882  		t.currDir = parentDir(t.currDir)
   883  		t.level--
   884  		return nil, nil
   885  	case api.ListParseUnknown15:
   886  		skip := int(r.ReadPu32())
   887  		for i := 0; i < skip; i++ {
   888  			r.ReadPu32()
   889  			r.ReadPu32()
   890  		}
   891  		return nil, nil
   892  	case api.ListParseReadItem:
   893  		// get item (see below)
   894  	default:
   895  		return nil, fmt.Errorf("unknown parse operation %d", parseOp)
   896  	}
   897  
   898  	// get item
   899  	head := r.ReadIntSpl()
   900  	itemType := head & 3
   901  	if (head & 4096) != 0 {
   902  		t.dunnoNodeID = r.ReadNBytes(api.DunnoNodeIDLength)
   903  	}
   904  	name := t.f.opt.Enc.FromStandardPath(string(r.ReadBytesByLength()))
   905  	t.dunno1 = int(r.ReadULong())
   906  	t.dunno2 = 0
   907  	t.dunno3 = 0
   908  
   909  	if r.Error() != nil {
   910  		return nil, r.Error()
   911  	}
   912  
   913  	var (
   914  		modTime time.Time
   915  		size    int64
   916  		binHash []byte
   917  		dirSize int64
   918  		isDir   = true
   919  	)
   920  
   921  	switch itemType {
   922  	case api.ListItemMountPoint:
   923  		t.treeID = r.ReadNBytes(api.TreeIDLength)
   924  		t.dunno2 = int(r.ReadULong())
   925  		t.dunno3 = int(r.ReadULong())
   926  	case api.ListItemFolder:
   927  		t.dunno2 = int(r.ReadULong())
   928  	case api.ListItemSharedFolder:
   929  		t.dunno2 = int(r.ReadULong())
   930  		t.treeID = r.ReadNBytes(api.TreeIDLength)
   931  	case api.ListItemFile:
   932  		isDir = false
   933  		modTime = r.ReadDate()
   934  		size = int64(r.ReadULong())
   935  		binHash = r.ReadNBytes(mrhash.Size)
   936  	default:
   937  		return nil, fmt.Errorf("unknown item type %d", itemType)
   938  	}
   939  
   940  	if isDir {
   941  		t.lastDir = path.Join(t.currDir, name)
   942  		if (t.options & api.ListOptDelete) != 0 {
   943  			t.dunnoDel1 = int(r.ReadPu32())
   944  			t.dunnoDel2 = int(r.ReadPu32())
   945  		}
   946  		if (t.options & api.ListOptFolderSize) != 0 {
   947  			dirSize = int64(r.ReadULong())
   948  		}
   949  	}
   950  
   951  	if r.Error() != nil {
   952  		return nil, r.Error()
   953  	}
   954  
   955  	if fs.Config.LogLevel >= fs.LogLevelDebug {
   956  		ctime, _ := modTime.MarshalJSON()
   957  		fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime)
   958  	}
   959  
   960  	if t.level != 1 {
   961  		// TODO: implement recursion and ListR
   962  		// Note: recursion is broken because maximum buffer size is 8K
   963  		return nil, nil
   964  	}
   965  
   966  	remote, err := t.f.relPath(path.Join(t.currDir, name))
   967  	if err != nil {
   968  		return nil, err
   969  	}
   970  	if isDir {
   971  		return fs.NewDir(remote, modTime).SetSize(dirSize), nil
   972  	}
   973  	obj := &Object{
   974  		fs:          t.f,
   975  		remote:      remote,
   976  		hasMetaData: true,
   977  		size:        size,
   978  		mrHash:      binHash,
   979  		modTime:     modTime,
   980  	}
   981  	return obj, nil
   982  }
   983  
   984  type treeState struct {
   985  	f           *Fs
   986  	r           *api.BinReader
   987  	options     int
   988  	rootDir     string
   989  	currDir     string
   990  	lastDir     string
   991  	level       int
   992  	revision    treeRevision
   993  	totalSpace  int64
   994  	usedSpace   int64
   995  	fingerprint []byte
   996  	dunno1      int
   997  	dunno2      int
   998  	dunno3      int
   999  	dunnoDel1   int
  1000  	dunnoDel2   int
  1001  	dunnoNodeID []byte
  1002  	treeID      []byte
  1003  }
  1004  
  1005  type treeRevision struct {
  1006  	ver       int16
  1007  	treeID    []byte
  1008  	treeIDNew []byte
  1009  	bgn       uint64
  1010  	bgnNew    uint64
  1011  }
  1012  
  1013  func (rev *treeRevision) Read(data *api.BinReader) error {
  1014  	rev.ver = data.ReadByteAsShort()
  1015  	switch rev.ver {
  1016  	case 0:
  1017  		// Revision()
  1018  	case 1, 2:
  1019  		rev.treeID = data.ReadNBytes(api.TreeIDLength)
  1020  		rev.bgn = data.ReadULong()
  1021  	case 3, 4:
  1022  		rev.treeID = data.ReadNBytes(api.TreeIDLength)
  1023  		rev.bgn = data.ReadULong()
  1024  		rev.treeIDNew = data.ReadNBytes(api.TreeIDLength)
  1025  		rev.bgnNew = data.ReadULong()
  1026  	case 5:
  1027  		rev.treeID = data.ReadNBytes(api.TreeIDLength)
  1028  		rev.bgn = data.ReadULong()
  1029  		rev.treeIDNew = data.ReadNBytes(api.TreeIDLength)
  1030  	default:
  1031  		return fmt.Errorf("unknown directory revision %d", rev.ver)
  1032  	}
  1033  	return data.Error()
  1034  }
  1035  
  1036  // CreateDir makes a directory (parent must exist)
  1037  func (f *Fs) CreateDir(ctx context.Context, path string) error {
  1038  	// fs.Debugf(f, ">>> CreateDir %q", path)
  1039  
  1040  	req := api.NewBinWriter()
  1041  	req.WritePu16(api.OperationCreateFolder)
  1042  	req.WritePu16(0) // revision
  1043  	req.WriteString(f.opt.Enc.FromStandardPath(path))
  1044  	req.WritePu32(0)
  1045  
  1046  	token, err := f.accessToken()
  1047  	if err != nil {
  1048  		return err
  1049  	}
  1050  	metaURL, err := f.metaServer(ctx)
  1051  	if err != nil {
  1052  		return err
  1053  	}
  1054  
  1055  	opts := rest.Opts{
  1056  		Method:  "POST",
  1057  		RootURL: metaURL,
  1058  		Parameters: url.Values{
  1059  			"client_id": {api.OAuthClientID},
  1060  			"token":     {token},
  1061  		},
  1062  		ContentType: api.BinContentType,
  1063  		Body:        req.Reader(),
  1064  	}
  1065  
  1066  	var res *http.Response
  1067  	err = f.pacer.Call(func() (bool, error) {
  1068  		res, err = f.srv.Call(ctx, &opts)
  1069  		return shouldRetry(res, err, f, &opts)
  1070  	})
  1071  	if err != nil {
  1072  		closeBody(res)
  1073  		return err
  1074  	}
  1075  
  1076  	reply := api.NewBinReader(res.Body)
  1077  	defer closeBody(res)
  1078  
  1079  	switch status := reply.ReadByteAsInt(); status {
  1080  	case api.MkdirResultOK:
  1081  		return nil
  1082  	case api.MkdirResultAlreadyExists, api.MkdirResultExistsDifferentCase:
  1083  		return ErrorDirAlreadyExists
  1084  	case api.MkdirResultSourceNotExists:
  1085  		return ErrorDirSourceNotExists
  1086  	case api.MkdirResultInvalidName:
  1087  		return ErrorInvalidName
  1088  	default:
  1089  		return fmt.Errorf("mkdir error %d", status)
  1090  	}
  1091  }
  1092  
  1093  // Mkdir creates the container (and its parents) if it doesn't exist.
  1094  // Normally it ignores the ErrorDirAlreadyExist, as required by rclone tests.
  1095  // Nevertheless, such programs as borgbackup or restic use mkdir as a locking
  1096  // primitive and depend on its atomicity, i.e. mkdir should fail if directory
  1097  // already exists. As a workaround, users can add string "atomicmkdir" in the
  1098  // hidden `quirks` parameter or in the `--mailru-quirks` command-line option.
  1099  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1100  	// fs.Debugf(f, ">>> Mkdir %q", dir)
  1101  	err := f.mkDirs(ctx, f.absPath(dir))
  1102  	if err == ErrorDirAlreadyExists && !f.quirks.atomicmkdir {
  1103  		return nil
  1104  	}
  1105  	return err
  1106  }
  1107  
  1108  // mkDirs creates container and its parents by absolute path,
  1109  // fails with ErrorDirAlreadyExists if it already exists.
  1110  func (f *Fs) mkDirs(ctx context.Context, path string) error {
  1111  	if path == "/" || path == "" {
  1112  		return nil
  1113  	}
  1114  	switch err := f.CreateDir(ctx, path); err {
  1115  	case nil:
  1116  		return nil
  1117  	case ErrorDirSourceNotExists:
  1118  		fs.Debugf(f, "mkDirs by part %q", path)
  1119  		// fall thru...
  1120  	default:
  1121  		return err
  1122  	}
  1123  	parts := strings.Split(strings.Trim(path, "/"), "/")
  1124  	path = ""
  1125  	for _, part := range parts {
  1126  		if part == "" {
  1127  			continue
  1128  		}
  1129  		path += "/" + part
  1130  		switch err := f.CreateDir(ctx, path); err {
  1131  		case nil, ErrorDirAlreadyExists:
  1132  			continue
  1133  		default:
  1134  			return err
  1135  		}
  1136  	}
  1137  	return nil
  1138  }
  1139  
  1140  func parentDir(absPath string) string {
  1141  	parent := path.Dir(strings.TrimRight(absPath, "/"))
  1142  	if parent == "." {
  1143  		parent = ""
  1144  	}
  1145  	return parent
  1146  }
  1147  
  1148  // mkParentDirs creates parent containers by absolute path,
  1149  // ignores the ErrorDirAlreadyExists
  1150  func (f *Fs) mkParentDirs(ctx context.Context, path string) error {
  1151  	err := f.mkDirs(ctx, parentDir(path))
  1152  	if err == ErrorDirAlreadyExists {
  1153  		return nil
  1154  	}
  1155  	return err
  1156  }
  1157  
  1158  // Rmdir deletes a directory.
  1159  // Returns an error if it isn't empty.
  1160  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1161  	// fs.Debugf(f, ">>> Rmdir %q", dir)
  1162  	return f.purgeWithCheck(ctx, dir, true, "rmdir")
  1163  }
  1164  
  1165  // Purge deletes all the files and the root directory
  1166  // Optional interface: Only implement this if you have a way of deleting
  1167  // all the files quicker than just running Remove() on the result of List()
  1168  func (f *Fs) Purge(ctx context.Context) error {
  1169  	// fs.Debugf(f, ">>> Purge")
  1170  	return f.purgeWithCheck(ctx, "", false, "purge")
  1171  }
  1172  
  1173  // purgeWithCheck() removes the root directory.
  1174  // Refuses if `check` is set and directory has anything in.
  1175  func (f *Fs) purgeWithCheck(ctx context.Context, dir string, check bool, opName string) error {
  1176  	path := f.absPath(dir)
  1177  	if path == "/" || path == "" {
  1178  		// Mailru will not allow to purge root space returning status 400
  1179  		return fs.ErrorNotDeletingDirs
  1180  	}
  1181  
  1182  	_, dirSize, err := f.readItemMetaData(ctx, path)
  1183  	if err != nil {
  1184  		return errors.Wrapf(err, "%s failed", opName)
  1185  	}
  1186  	if check && dirSize > 0 {
  1187  		return fs.ErrorDirectoryNotEmpty
  1188  	}
  1189  	return f.delete(ctx, path, false)
  1190  }
  1191  
  1192  func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
  1193  	token, err := f.accessToken()
  1194  	if err != nil {
  1195  		return err
  1196  	}
  1197  
  1198  	data := url.Values{"home": {f.opt.Enc.FromStandardPath(path)}}
  1199  	opts := rest.Opts{
  1200  		Method: "POST",
  1201  		Path:   "/api/m1/file/remove",
  1202  		Parameters: url.Values{
  1203  			"access_token": {token},
  1204  		},
  1205  		Body:        strings.NewReader(data.Encode()),
  1206  		ContentType: api.BinContentType,
  1207  	}
  1208  
  1209  	var response api.GenericResponse
  1210  	err = f.pacer.Call(func() (bool, error) {
  1211  		res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
  1212  		return shouldRetry(res, err, f, &opts)
  1213  	})
  1214  
  1215  	switch {
  1216  	case err != nil:
  1217  		return err
  1218  	case response.Status == 200:
  1219  		return nil
  1220  	default:
  1221  		return fmt.Errorf("delete failed with code %d", response.Status)
  1222  	}
  1223  }
  1224  
  1225  // Copy src to this remote using server side copy operations.
  1226  // This is stored with the remote path given.
  1227  // It returns the destination Object and a possible error.
  1228  // Will only be called if src.Fs().Name() == f.Name()
  1229  // If it isn't possible then return fs.ErrorCantCopy
  1230  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1231  	// fs.Debugf(f, ">>> Copy %q %q", src.Remote(), remote)
  1232  
  1233  	srcObj, ok := src.(*Object)
  1234  	if !ok {
  1235  		fs.Debugf(src, "Can't copy - not same remote type")
  1236  		return nil, fs.ErrorCantCopy
  1237  	}
  1238  	if srcObj.fs.opt.Username != f.opt.Username {
  1239  		// Can copy across mailru configs only if they share common account
  1240  		fs.Debugf(src, "Can't copy - not same account")
  1241  		return nil, fs.ErrorCantCopy
  1242  	}
  1243  
  1244  	srcPath := srcObj.absPath()
  1245  	dstPath := f.absPath(remote)
  1246  	overwrite := false
  1247  	// fs.Debugf(f, "copy %q -> %q\n", srcPath, dstPath)
  1248  
  1249  	err := f.mkParentDirs(ctx, dstPath)
  1250  	if err != nil {
  1251  		return nil, err
  1252  	}
  1253  
  1254  	data := url.Values{}
  1255  	data.Set("home", f.opt.Enc.FromStandardPath(srcPath))
  1256  	data.Set("folder", f.opt.Enc.FromStandardPath(parentDir(dstPath)))
  1257  	data.Set("email", f.opt.Username)
  1258  	data.Set("x-email", f.opt.Username)
  1259  
  1260  	if overwrite {
  1261  		data.Set("conflict", "rewrite")
  1262  	} else {
  1263  		data.Set("conflict", "rename")
  1264  	}
  1265  
  1266  	token, err := f.accessToken()
  1267  	if err != nil {
  1268  		return nil, err
  1269  	}
  1270  
  1271  	opts := rest.Opts{
  1272  		Method: "POST",
  1273  		Path:   "/api/m1/file/copy",
  1274  		Parameters: url.Values{
  1275  			"access_token": {token},
  1276  		},
  1277  		Body:        strings.NewReader(data.Encode()),
  1278  		ContentType: api.BinContentType,
  1279  	}
  1280  
  1281  	var response api.GenericBodyResponse
  1282  	err = f.pacer.Call(func() (bool, error) {
  1283  		res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
  1284  		return shouldRetry(res, err, f, &opts)
  1285  	})
  1286  
  1287  	if err != nil {
  1288  		return nil, errors.Wrap(err, "couldn't copy file")
  1289  	}
  1290  	if response.Status != 200 {
  1291  		return nil, fmt.Errorf("copy failed with code %d", response.Status)
  1292  	}
  1293  
  1294  	tmpPath := f.opt.Enc.ToStandardPath(response.Body)
  1295  	if tmpPath != dstPath {
  1296  		// fs.Debugf(f, "rename temporary file %q -> %q\n", tmpPath, dstPath)
  1297  		err = f.moveItemBin(ctx, tmpPath, dstPath, "rename temporary file")
  1298  		if err != nil {
  1299  			_ = f.delete(ctx, tmpPath, false) // ignore error
  1300  			return nil, err
  1301  		}
  1302  	}
  1303  
  1304  	// fix modification time at destination
  1305  	dstObj := &Object{
  1306  		fs:     f,
  1307  		remote: remote,
  1308  	}
  1309  	err = dstObj.readMetaData(ctx, true)
  1310  	if err == nil && dstObj.modTime != srcObj.modTime {
  1311  		dstObj.modTime = srcObj.modTime
  1312  		err = dstObj.addFileMetaData(ctx, true)
  1313  	}
  1314  	if err != nil {
  1315  		dstObj = nil
  1316  	}
  1317  	return dstObj, err
  1318  }
  1319  
  1320  // Move src to this remote using server side move operations.
  1321  // This is stored with the remote path given.
  1322  // It returns the destination Object and a possible error.
  1323  // Will only be called if src.Fs().Name() == f.Name()
  1324  // If it isn't possible then return fs.ErrorCantMove
  1325  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1326  	// fs.Debugf(f, ">>> Move %q %q", src.Remote(), remote)
  1327  
  1328  	srcObj, ok := src.(*Object)
  1329  	if !ok {
  1330  		fs.Debugf(src, "Can't move - not same remote type")
  1331  		return nil, fs.ErrorCantMove
  1332  	}
  1333  	if srcObj.fs.opt.Username != f.opt.Username {
  1334  		// Can move across mailru configs only if they share common account
  1335  		fs.Debugf(src, "Can't move - not same account")
  1336  		return nil, fs.ErrorCantMove
  1337  	}
  1338  
  1339  	srcPath := srcObj.absPath()
  1340  	dstPath := f.absPath(remote)
  1341  
  1342  	err := f.mkParentDirs(ctx, dstPath)
  1343  	if err != nil {
  1344  		return nil, err
  1345  	}
  1346  
  1347  	err = f.moveItemBin(ctx, srcPath, dstPath, "move file")
  1348  	if err != nil {
  1349  		return nil, err
  1350  	}
  1351  
  1352  	return f.NewObject(ctx, remote)
  1353  }
  1354  
  1355  // move/rename an object using BIN protocol
  1356  func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) error {
  1357  	token, err := f.accessToken()
  1358  	if err != nil {
  1359  		return err
  1360  	}
  1361  	metaURL, err := f.metaServer(ctx)
  1362  	if err != nil {
  1363  		return err
  1364  	}
  1365  
  1366  	req := api.NewBinWriter()
  1367  	req.WritePu16(api.OperationRename)
  1368  	req.WritePu32(0) // old revision
  1369  	req.WriteString(f.opt.Enc.FromStandardPath(srcPath))
  1370  	req.WritePu32(0) // new revision
  1371  	req.WriteString(f.opt.Enc.FromStandardPath(dstPath))
  1372  	req.WritePu32(0) // dunno
  1373  
  1374  	opts := rest.Opts{
  1375  		Method:  "POST",
  1376  		RootURL: metaURL,
  1377  		Parameters: url.Values{
  1378  			"client_id": {api.OAuthClientID},
  1379  			"token":     {token},
  1380  		},
  1381  		ContentType: api.BinContentType,
  1382  		Body:        req.Reader(),
  1383  	}
  1384  
  1385  	var res *http.Response
  1386  	err = f.pacer.Call(func() (bool, error) {
  1387  		res, err = f.srv.Call(ctx, &opts)
  1388  		return shouldRetry(res, err, f, &opts)
  1389  	})
  1390  	if err != nil {
  1391  		closeBody(res)
  1392  		return err
  1393  	}
  1394  
  1395  	reply := api.NewBinReader(res.Body)
  1396  	defer closeBody(res)
  1397  
  1398  	switch status := reply.ReadByteAsInt(); status {
  1399  	case api.MoveResultOK:
  1400  		return nil
  1401  	default:
  1402  		return fmt.Errorf("%s failed with error %d", opName, status)
  1403  	}
  1404  }
  1405  
  1406  // DirMove moves src, srcRemote to this remote at dstRemote
  1407  // using server side move operations.
  1408  // Will only be called if src.Fs().Name() == f.Name()
  1409  // If it isn't possible then return fs.ErrorCantDirMove
  1410  // If destination exists then return fs.ErrorDirExists
  1411  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1412  	// fs.Debugf(f, ">>> DirMove %q %q", srcRemote, dstRemote)
  1413  
  1414  	srcFs, ok := src.(*Fs)
  1415  	if !ok {
  1416  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  1417  		return fs.ErrorCantDirMove
  1418  	}
  1419  	if srcFs.opt.Username != f.opt.Username {
  1420  		// Can move across mailru configs only if they share common account
  1421  		fs.Debugf(src, "Can't move - not same account")
  1422  		return fs.ErrorCantDirMove
  1423  	}
  1424  	srcPath := srcFs.absPath(srcRemote)
  1425  	dstPath := f.absPath(dstRemote)
  1426  	// fs.Debugf(srcFs, "DirMove [%s]%q --> [%s]%q\n", srcRemote, srcPath, dstRemote, dstPath)
  1427  
  1428  	// Refuse to move to or from the root
  1429  	if len(srcPath) <= len(srcFs.root) || len(dstPath) <= len(f.root) {
  1430  		fs.Debugf(src, "DirMove error: Can't move root")
  1431  		return errors.New("can't move root directory")
  1432  	}
  1433  
  1434  	err := f.mkParentDirs(ctx, dstPath)
  1435  	if err != nil {
  1436  		return err
  1437  	}
  1438  
  1439  	_, _, err = f.readItemMetaData(ctx, dstPath)
  1440  	switch err {
  1441  	case fs.ErrorObjectNotFound:
  1442  		// OK!
  1443  	case nil:
  1444  		return fs.ErrorDirExists
  1445  	default:
  1446  		return err
  1447  	}
  1448  
  1449  	return f.moveItemBin(ctx, srcPath, dstPath, "directory move")
  1450  }
  1451  
  1452  // PublicLink generates a public link to the remote path (usually readable by anyone)
  1453  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  1454  	// fs.Debugf(f, ">>> PublicLink %q", remote)
  1455  
  1456  	token, err := f.accessToken()
  1457  	if err != nil {
  1458  		return "", err
  1459  	}
  1460  
  1461  	data := url.Values{}
  1462  	data.Set("home", f.opt.Enc.FromStandardPath(f.absPath(remote)))
  1463  	data.Set("email", f.opt.Username)
  1464  	data.Set("x-email", f.opt.Username)
  1465  
  1466  	opts := rest.Opts{
  1467  		Method: "POST",
  1468  		Path:   "/api/m1/file/publish",
  1469  		Parameters: url.Values{
  1470  			"access_token": {token},
  1471  		},
  1472  		Body:        strings.NewReader(data.Encode()),
  1473  		ContentType: api.BinContentType,
  1474  	}
  1475  
  1476  	var response api.GenericBodyResponse
  1477  	err = f.pacer.Call(func() (bool, error) {
  1478  		res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
  1479  		return shouldRetry(res, err, f, &opts)
  1480  	})
  1481  
  1482  	if err == nil && response.Body != "" {
  1483  		return api.PublicLinkURL + response.Body, nil
  1484  	}
  1485  	if err == nil {
  1486  		return "", errors.New("server returned empty link")
  1487  	}
  1488  	if apiErr, ok := err.(*api.FileErrorResponse); ok && apiErr.Status == 404 {
  1489  		return "", fs.ErrorObjectNotFound
  1490  	}
  1491  	return "", err
  1492  }
  1493  
  1494  // CleanUp permanently deletes all trashed files/folders
  1495  func (f *Fs) CleanUp(ctx context.Context) error {
  1496  	// fs.Debugf(f, ">>> CleanUp")
  1497  
  1498  	token, err := f.accessToken()
  1499  	if err != nil {
  1500  		return err
  1501  	}
  1502  
  1503  	data := url.Values{
  1504  		"email":   {f.opt.Username},
  1505  		"x-email": {f.opt.Username},
  1506  	}
  1507  	opts := rest.Opts{
  1508  		Method: "POST",
  1509  		Path:   "/api/m1/trashbin/empty",
  1510  		Parameters: url.Values{
  1511  			"access_token": {token},
  1512  		},
  1513  		Body:        strings.NewReader(data.Encode()),
  1514  		ContentType: api.BinContentType,
  1515  	}
  1516  
  1517  	var response api.CleanupResponse
  1518  	err = f.pacer.Call(func() (bool, error) {
  1519  		res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
  1520  		return shouldRetry(res, err, f, &opts)
  1521  	})
  1522  	if err != nil {
  1523  		return err
  1524  	}
  1525  
  1526  	switch response.StatusStr {
  1527  	case "200":
  1528  		return nil
  1529  	default:
  1530  		return fmt.Errorf("cleanup failed (%s)", response.StatusStr)
  1531  	}
  1532  }
  1533  
  1534  // About gets quota information
  1535  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  1536  	// fs.Debugf(f, ">>> About")
  1537  
  1538  	token, err := f.accessToken()
  1539  	if err != nil {
  1540  		return nil, err
  1541  	}
  1542  	opts := rest.Opts{
  1543  		Method: "GET",
  1544  		Path:   "/api/m1/user",
  1545  		Parameters: url.Values{
  1546  			"access_token": {token},
  1547  		},
  1548  	}
  1549  
  1550  	var info api.UserInfoResponse
  1551  	err = f.pacer.Call(func() (bool, error) {
  1552  		res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
  1553  		return shouldRetry(res, err, f, &opts)
  1554  	})
  1555  	if err != nil {
  1556  		return nil, err
  1557  	}
  1558  
  1559  	total := info.Body.Cloud.Space.BytesTotal
  1560  	used := int64(info.Body.Cloud.Space.BytesUsed)
  1561  
  1562  	usage := &fs.Usage{
  1563  		Total: fs.NewUsageValue(total),
  1564  		Used:  fs.NewUsageValue(used),
  1565  		Free:  fs.NewUsageValue(total - used),
  1566  	}
  1567  	return usage, nil
  1568  }
  1569  
  1570  // Put the object
  1571  // Copy the reader in to the new object which is returned
  1572  // The new object may have been created if an error is returned
  1573  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1574  	o := &Object{
  1575  		fs:      f,
  1576  		remote:  src.Remote(),
  1577  		size:    src.Size(),
  1578  		modTime: src.ModTime(ctx),
  1579  	}
  1580  	// fs.Debugf(f, ">>> Put: %q %d '%v'", o.remote, o.size, o.modTime)
  1581  	return o, o.Update(ctx, in, src, options...)
  1582  }
  1583  
  1584  // Update an existing object
  1585  // Copy the reader into the object updating modTime and size
  1586  // The new object may have been created if an error is returned
  1587  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1588  	wrapIn := in
  1589  	size := src.Size()
  1590  	if size < 0 {
  1591  		return errors.New("mailru does not support streaming uploads")
  1592  	}
  1593  
  1594  	err := o.fs.mkParentDirs(ctx, o.absPath())
  1595  	if err != nil {
  1596  		return err
  1597  	}
  1598  
  1599  	var (
  1600  		fileBuf    []byte
  1601  		fileHash   []byte
  1602  		newHash    []byte
  1603  		trySpeedup bool
  1604  	)
  1605  
  1606  	// Don't disturb the source if file fits in hash.
  1607  	// Skip an extra speedup request if file fits in hash.
  1608  	if size > mrhash.Size {
  1609  		// Request hash from source.
  1610  		if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
  1611  			fileHash, _ = mrhash.DecodeString(srcHash)
  1612  		}
  1613  
  1614  		// Try speedup if it's globally enabled and source hash is available.
  1615  		trySpeedup = o.fs.opt.SpeedupEnable
  1616  		if trySpeedup && fileHash != nil {
  1617  			if o.putByHash(ctx, fileHash, src, "source") {
  1618  				return nil
  1619  			}
  1620  			trySpeedup = false // speedup failed, force upload
  1621  		}
  1622  	}
  1623  
  1624  	// Need to calculate hash, check whether file is still eligible for speedup
  1625  	if trySpeedup {
  1626  		trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...)
  1627  	}
  1628  
  1629  	// Attempt to put by calculating hash in memory
  1630  	if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
  1631  		//fs.Debugf(o, "attempt to put by hash from memory")
  1632  		fileBuf, err = ioutil.ReadAll(in)
  1633  		if err != nil {
  1634  			return err
  1635  		}
  1636  		fileHash = mrhash.Sum(fileBuf)
  1637  		if o.putByHash(ctx, fileHash, src, "memory") {
  1638  			return nil
  1639  		}
  1640  		wrapIn = bytes.NewReader(fileBuf)
  1641  		trySpeedup = false // speedup failed, force upload
  1642  	}
  1643  
  1644  	// Attempt to put by hash using a spool file
  1645  	if trySpeedup {
  1646  		tmpFs, err := fs.TemporaryLocalFs()
  1647  		if err != nil {
  1648  			fs.Infof(tmpFs, "Failed to create spool FS: %v", err)
  1649  		} else {
  1650  			defer func() {
  1651  				if err := operations.Purge(ctx, tmpFs, ""); err != nil {
  1652  					fs.Infof(tmpFs, "Failed to cleanup spool FS: %v", err)
  1653  				}
  1654  			}()
  1655  
  1656  			spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
  1657  			if err != nil {
  1658  				return errors.Wrap(err, "Failed to create spool file")
  1659  			}
  1660  			if o.putByHash(ctx, mrHash, src, "spool") {
  1661  				// If put by hash is successful, ignore transitive error
  1662  				return nil
  1663  			}
  1664  			if wrapIn, err = spoolFile.Open(ctx); err != nil {
  1665  				return err
  1666  			}
  1667  			fileHash = mrHash
  1668  		}
  1669  	}
  1670  
  1671  	// Upload object data
  1672  	if size <= mrhash.Size {
  1673  		// Optimize upload: skip extra request if data fits in the hash buffer.
  1674  		if fileBuf == nil {
  1675  			fileBuf, err = ioutil.ReadAll(wrapIn)
  1676  		}
  1677  		if fileHash == nil && err == nil {
  1678  			fileHash = mrhash.Sum(fileBuf)
  1679  		}
  1680  		newHash = fileHash
  1681  	} else {
  1682  		var hasher gohash.Hash
  1683  		if fileHash == nil {
  1684  			// Calculate hash in transit
  1685  			hasher = mrhash.New()
  1686  			wrapIn = io.TeeReader(wrapIn, hasher)
  1687  		}
  1688  		newHash, err = o.upload(ctx, wrapIn, size, options...)
  1689  		if fileHash == nil && err == nil {
  1690  			fileHash = hasher.Sum(nil)
  1691  		}
  1692  	}
  1693  	if err != nil {
  1694  		return err
  1695  	}
  1696  
  1697  	if bytes.Compare(fileHash, newHash) != 0 {
  1698  		if o.fs.opt.CheckHash {
  1699  			return mrhash.ErrorInvalidHash
  1700  		}
  1701  		fs.Infof(o, "hash mismatch on upload: expected %x received %x", fileHash, newHash)
  1702  	}
  1703  	o.mrHash = newHash
  1704  	o.size = size
  1705  	o.modTime = src.ModTime(ctx)
  1706  	return o.addFileMetaData(ctx, true)
  1707  }
  1708  
  1709  // eligibleForSpeedup checks whether file is eligible for speedup method (put by hash)
  1710  func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOption) bool {
  1711  	if !f.opt.SpeedupEnable {
  1712  		return false
  1713  	}
  1714  	if size <= mrhash.Size || size < speedupMinSize || size >= int64(f.opt.SpeedupMaxDisk) {
  1715  		return false
  1716  	}
  1717  	_, _, partial := getTransferRange(size, options...)
  1718  	if partial {
  1719  		return false
  1720  	}
  1721  	if f.speedupAny {
  1722  		return true
  1723  	}
  1724  	if f.speedupGlobs == nil {
  1725  		return false
  1726  	}
  1727  	nameLower := strings.ToLower(strings.TrimSpace(path.Base(remote)))
  1728  	for _, pattern := range f.speedupGlobs {
  1729  		if matches, _ := filepath.Match(pattern, nameLower); matches {
  1730  			return true
  1731  		}
  1732  	}
  1733  	return false
  1734  }
  1735  
  1736  // parseSpeedupPatterns converts pattern string into list of unique glob patterns
  1737  func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
  1738  	f.speedupGlobs = nil
  1739  	f.speedupAny = false
  1740  	uniqueValidPatterns := make(map[string]interface{})
  1741  
  1742  	for _, pattern := range strings.Split(patternString, ",") {
  1743  		pattern = strings.ToLower(strings.TrimSpace(pattern))
  1744  		if pattern == "" {
  1745  			continue
  1746  		}
  1747  		if pattern == "*" {
  1748  			f.speedupAny = true
  1749  		}
  1750  		if _, err := filepath.Match(pattern, ""); err != nil {
  1751  			return fmt.Errorf("invalid file name pattern %q", pattern)
  1752  		}
  1753  		uniqueValidPatterns[pattern] = nil
  1754  	}
  1755  	for pattern := range uniqueValidPatterns {
  1756  		f.speedupGlobs = append(f.speedupGlobs, pattern)
  1757  	}
  1758  	return nil
  1759  }
  1760  
  1761  func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
  1762  	oNew := new(Object)
  1763  	*oNew = *o
  1764  	oNew.mrHash = mrHash
  1765  	oNew.size = info.Size()
  1766  	oNew.modTime = info.ModTime(ctx)
  1767  	if err := oNew.addFileMetaData(ctx, true); err != nil {
  1768  		fs.Debugf(o, "Cannot put by hash from %s, performing upload", method)
  1769  		return false
  1770  	}
  1771  	*o = *oNew
  1772  	fs.Debugf(o, "File has been put by hash from %s", method)
  1773  	return true
  1774  }
  1775  
  1776  func makeTempFile(ctx context.Context, tmpFs fs.Fs, wrapIn io.Reader, src fs.ObjectInfo) (spoolFile fs.Object, mrHash []byte, err error) {
  1777  	// Local temporary file system must support SHA1
  1778  	hashType := hash.SHA1
  1779  
  1780  	// Calculate Mailru and spool verification hashes in transit
  1781  	hashSet := hash.NewHashSet(MrHashType, hashType)
  1782  	hasher, err := hash.NewMultiHasherTypes(hashSet)
  1783  	if err != nil {
  1784  		return nil, nil, err
  1785  	}
  1786  	wrapIn = io.TeeReader(wrapIn, hasher)
  1787  
  1788  	// Copy stream into spool file
  1789  	tmpInfo := object.NewStaticObjectInfo(src.Remote(), src.ModTime(ctx), src.Size(), false, nil, nil)
  1790  	hashOption := &fs.HashesOption{Hashes: hashSet}
  1791  	if spoolFile, err = tmpFs.Put(ctx, wrapIn, tmpInfo, hashOption); err != nil {
  1792  		return nil, nil, err
  1793  	}
  1794  
  1795  	// Validate spool file
  1796  	sums := hasher.Sums()
  1797  	checkSum := sums[hashType]
  1798  	fileSum, err := spoolFile.Hash(ctx, hashType)
  1799  	if spoolFile.Size() != src.Size() || err != nil || checkSum == "" || fileSum != checkSum {
  1800  		return nil, nil, mrhash.ErrorInvalidHash
  1801  	}
  1802  
  1803  	mrHash, err = mrhash.DecodeString(sums[MrHashType])
  1804  	return
  1805  }
  1806  
  1807  func (o *Object) upload(ctx context.Context, in io.Reader, size int64, options ...fs.OpenOption) ([]byte, error) {
  1808  	token, err := o.fs.accessToken()
  1809  	if err != nil {
  1810  		return nil, err
  1811  	}
  1812  	shardURL, err := o.fs.uploadShard(ctx)
  1813  	if err != nil {
  1814  		return nil, err
  1815  	}
  1816  
  1817  	opts := rest.Opts{
  1818  		Method:        "PUT",
  1819  		RootURL:       shardURL,
  1820  		Body:          in,
  1821  		Options:       options,
  1822  		ContentLength: &size,
  1823  		Parameters: url.Values{
  1824  			"client_id": {api.OAuthClientID},
  1825  			"token":     {token},
  1826  		},
  1827  		ExtraHeaders: map[string]string{
  1828  			"Accept": "*/*",
  1829  		},
  1830  	}
  1831  
  1832  	var (
  1833  		res     *http.Response
  1834  		strHash string
  1835  	)
  1836  	err = o.fs.pacer.Call(func() (bool, error) {
  1837  		res, err = o.fs.srv.Call(ctx, &opts)
  1838  		if err == nil {
  1839  			strHash, err = readBodyWord(res)
  1840  		}
  1841  		return fserrors.ShouldRetry(err), err
  1842  	})
  1843  	if err != nil {
  1844  		closeBody(res)
  1845  		return nil, err
  1846  	}
  1847  
  1848  	switch res.StatusCode {
  1849  	case 200, 201:
  1850  		return mrhash.DecodeString(strHash)
  1851  	default:
  1852  		return nil, fmt.Errorf("upload failed with code %s (%d)", res.Status, res.StatusCode)
  1853  	}
  1854  }
  1855  
  1856  func (f *Fs) uploadShard(ctx context.Context) (string, error) {
  1857  	f.shardMu.Lock()
  1858  	defer f.shardMu.Unlock()
  1859  
  1860  	if f.shardURL != "" && time.Now().Before(f.shardExpiry) {
  1861  		return f.shardURL, nil
  1862  	}
  1863  
  1864  	token, err := f.accessToken()
  1865  	if err != nil {
  1866  		return "", err
  1867  	}
  1868  
  1869  	opts := rest.Opts{
  1870  		Method: "GET",
  1871  		Path:   "/api/m1/dispatcher",
  1872  		Parameters: url.Values{
  1873  			"client_id":    {api.OAuthClientID},
  1874  			"access_token": {token},
  1875  		},
  1876  	}
  1877  
  1878  	var info api.ShardInfoResponse
  1879  	err = f.pacer.Call(func() (bool, error) {
  1880  		res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
  1881  		return shouldRetry(res, err, f, &opts)
  1882  	})
  1883  	if err != nil {
  1884  		return "", err
  1885  	}
  1886  
  1887  	f.shardURL = info.Body.Upload[0].URL
  1888  	f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
  1889  	fs.Debugf(f, "new upload shard: %s", f.shardURL)
  1890  
  1891  	return f.shardURL, nil
  1892  }
  1893  
  1894  // Object describes a mailru object
  1895  type Object struct {
  1896  	fs          *Fs       // what this object is part of
  1897  	remote      string    // The remote path
  1898  	hasMetaData bool      // whether info below has been set
  1899  	size        int64     // Bytes in the object
  1900  	modTime     time.Time // Modified time of the object
  1901  	mrHash      []byte    // Mail.ru flavored SHA1 hash of the object
  1902  }
  1903  
  1904  // NewObject finds an Object at the remote.
  1905  // If object can't be found it fails with fs.ErrorObjectNotFound
  1906  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1907  	// fs.Debugf(f, ">>> NewObject %q", remote)
  1908  	o := &Object{
  1909  		fs:     f,
  1910  		remote: remote,
  1911  	}
  1912  	err := o.readMetaData(ctx, true)
  1913  	if err != nil {
  1914  		return nil, err
  1915  	}
  1916  	return o, nil
  1917  }
  1918  
  1919  // absPath converts root-relative remote to absolute home path
  1920  func (o *Object) absPath() string {
  1921  	return o.fs.absPath(o.remote)
  1922  }
  1923  
  1924  // Object.readMetaData reads and fills a file info
  1925  // If object can't be found it fails with fs.ErrorObjectNotFound
  1926  func (o *Object) readMetaData(ctx context.Context, force bool) error {
  1927  	if o.hasMetaData && !force {
  1928  		return nil
  1929  	}
  1930  	entry, dirSize, err := o.fs.readItemMetaData(ctx, o.absPath())
  1931  	if err != nil {
  1932  		return err
  1933  	}
  1934  	newObj, ok := entry.(*Object)
  1935  	if !ok || dirSize >= 0 {
  1936  		return fs.ErrorNotAFile
  1937  	}
  1938  	if newObj.remote != o.remote {
  1939  		return fmt.Errorf("File %q path has changed to %q", o.remote, newObj.remote)
  1940  	}
  1941  	o.hasMetaData = true
  1942  	o.size = newObj.size
  1943  	o.modTime = newObj.modTime
  1944  	o.mrHash = newObj.mrHash
  1945  	return nil
  1946  }
  1947  
  1948  // Fs returns the parent Fs
  1949  func (o *Object) Fs() fs.Info {
  1950  	return o.fs
  1951  }
  1952  
  1953  // Return a string version
  1954  func (o *Object) String() string {
  1955  	if o == nil {
  1956  		return "<nil>"
  1957  	}
  1958  	//return fmt.Sprintf("[%s]%q", o.fs.root, o.remote)
  1959  	return o.remote
  1960  }
  1961  
  1962  // Remote returns the remote path
  1963  func (o *Object) Remote() string {
  1964  	return o.remote
  1965  }
  1966  
  1967  // ModTime returns the modification time of the object
  1968  // It attempts to read the objects mtime and if that isn't present the
  1969  // LastModified returned in the http headers
  1970  func (o *Object) ModTime(ctx context.Context) time.Time {
  1971  	err := o.readMetaData(ctx, false)
  1972  	if err != nil {
  1973  		fs.Errorf(o, "%v", err)
  1974  	}
  1975  	return o.modTime
  1976  }
  1977  
  1978  // Size returns the size of an object in bytes
  1979  func (o *Object) Size() int64 {
  1980  	ctx := context.Background() // Note: Object.Size does not pass context!
  1981  	err := o.readMetaData(ctx, false)
  1982  	if err != nil {
  1983  		fs.Errorf(o, "%v", err)
  1984  	}
  1985  	return o.size
  1986  }
  1987  
  1988  // Hash returns the MD5 or SHA1 sum of an object
  1989  // returning a lowercase hex string
  1990  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1991  	if t == MrHashType {
  1992  		return hex.EncodeToString(o.mrHash), nil
  1993  	}
  1994  	return "", hash.ErrUnsupported
  1995  }
  1996  
  1997  // Storable returns whether this object is storable
  1998  func (o *Object) Storable() bool {
  1999  	return true
  2000  }
  2001  
  2002  // SetModTime sets the modification time of the local fs object
  2003  //
  2004  // Commits the datastore
  2005  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  2006  	// fs.Debugf(o, ">>> SetModTime [%v]", modTime)
  2007  	o.modTime = modTime
  2008  	return o.addFileMetaData(ctx, true)
  2009  }
  2010  
  2011  func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
  2012  	if len(o.mrHash) != mrhash.Size {
  2013  		return mrhash.ErrorInvalidHash
  2014  	}
  2015  	token, err := o.fs.accessToken()
  2016  	if err != nil {
  2017  		return err
  2018  	}
  2019  	metaURL, err := o.fs.metaServer(ctx)
  2020  	if err != nil {
  2021  		return err
  2022  	}
  2023  
  2024  	req := api.NewBinWriter()
  2025  	req.WritePu16(api.OperationAddFile)
  2026  	req.WritePu16(0) // revision
  2027  	req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
  2028  	req.WritePu64(o.size)
  2029  	req.WritePu64(o.modTime.Unix())
  2030  	req.WritePu32(0)
  2031  	req.Write(o.mrHash)
  2032  
  2033  	if overwrite {
  2034  		// overwrite
  2035  		req.WritePu32(1)
  2036  	} else {
  2037  		// don't add if not changed, add with rename if changed
  2038  		req.WritePu32(55)
  2039  		req.Write(o.mrHash)
  2040  		req.WritePu64(o.size)
  2041  	}
  2042  
  2043  	opts := rest.Opts{
  2044  		Method:  "POST",
  2045  		RootURL: metaURL,
  2046  		Parameters: url.Values{
  2047  			"client_id": {api.OAuthClientID},
  2048  			"token":     {token},
  2049  		},
  2050  		ContentType: api.BinContentType,
  2051  		Body:        req.Reader(),
  2052  	}
  2053  
  2054  	var res *http.Response
  2055  	err = o.fs.pacer.Call(func() (bool, error) {
  2056  		res, err = o.fs.srv.Call(ctx, &opts)
  2057  		return shouldRetry(res, err, o.fs, &opts)
  2058  	})
  2059  	if err != nil {
  2060  		closeBody(res)
  2061  		return err
  2062  	}
  2063  
  2064  	reply := api.NewBinReader(res.Body)
  2065  	defer closeBody(res)
  2066  
  2067  	switch status := reply.ReadByteAsInt(); status {
  2068  	case api.AddResultOK, api.AddResultNotModified, api.AddResultDunno04, api.AddResultDunno09:
  2069  		return nil
  2070  	case api.AddResultInvalidName:
  2071  		return ErrorInvalidName
  2072  	default:
  2073  		return fmt.Errorf("add file error %d", status)
  2074  	}
  2075  }
  2076  
  2077  // Remove an object
  2078  func (o *Object) Remove(ctx context.Context) error {
  2079  	// fs.Debugf(o, ">>> Remove")
  2080  	return o.fs.delete(ctx, o.absPath(), false)
  2081  }
  2082  
  2083  // getTransferRange detects partial transfers and calculates start/end offsets into file
  2084  func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end int64, partial bool) {
  2085  	var offset, limit int64 = 0, -1
  2086  
  2087  	for _, option := range options {
  2088  		switch opt := option.(type) {
  2089  		case *fs.SeekOption:
  2090  			offset = opt.Offset
  2091  		case *fs.RangeOption:
  2092  			offset, limit = opt.Decode(size)
  2093  		default:
  2094  			if option.Mandatory() {
  2095  				fs.Errorf(nil, "Unsupported mandatory option: %v", option)
  2096  			}
  2097  		}
  2098  	}
  2099  	if limit < 0 {
  2100  		limit = size - offset
  2101  	}
  2102  	end = offset + limit
  2103  	if end > size {
  2104  		end = size
  2105  	}
  2106  	partial = !(offset == 0 && end == size)
  2107  	return offset, end, partial
  2108  }
  2109  
  2110  // Open an object for read and download its content
  2111  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2112  	// fs.Debugf(o, ">>> Open")
  2113  
  2114  	token, err := o.fs.accessToken()
  2115  	if err != nil {
  2116  		return nil, err
  2117  	}
  2118  
  2119  	start, end, partial := getTransferRange(o.size, options...)
  2120  
  2121  	// TODO: set custom timeouts
  2122  	opts := rest.Opts{
  2123  		Method:  "GET",
  2124  		Options: options,
  2125  		Path:    url.PathEscape(strings.TrimLeft(o.fs.opt.Enc.FromStandardPath(o.absPath()), "/")),
  2126  		Parameters: url.Values{
  2127  			"client_id": {api.OAuthClientID},
  2128  			"token":     {token},
  2129  		},
  2130  		ExtraHeaders: map[string]string{
  2131  			"Accept": "*/*",
  2132  			"Range":  fmt.Sprintf("bytes=%d-%d", start, end-1),
  2133  		},
  2134  	}
  2135  
  2136  	var res *http.Response
  2137  	server := ""
  2138  	err = o.fs.pacer.Call(func() (bool, error) {
  2139  		server, err = o.fs.fileServers.Dispatch(ctx, server)
  2140  		if err != nil {
  2141  			return false, err
  2142  		}
  2143  		opts.RootURL = server
  2144  		res, err = o.fs.srv.Call(ctx, &opts)
  2145  		return shouldRetry(res, err, o.fs, &opts)
  2146  	})
  2147  	if err != nil {
  2148  		if res != nil && res.Body != nil {
  2149  			closeBody(res)
  2150  		}
  2151  		return nil, err
  2152  	}
  2153  
  2154  	var hasher gohash.Hash
  2155  	if !partial {
  2156  		// Cannot check hash of partial download
  2157  		hasher = mrhash.New()
  2158  	}
  2159  	wrapStream := &endHandler{
  2160  		ctx:    ctx,
  2161  		stream: res.Body,
  2162  		hasher: hasher,
  2163  		o:      o,
  2164  		server: server,
  2165  	}
  2166  	return wrapStream, nil
  2167  }
  2168  
  2169  type endHandler struct {
  2170  	ctx    context.Context
  2171  	stream io.ReadCloser
  2172  	hasher gohash.Hash
  2173  	o      *Object
  2174  	server string
  2175  	done   bool
  2176  }
  2177  
  2178  func (e *endHandler) Read(p []byte) (n int, err error) {
  2179  	n, err = e.stream.Read(p)
  2180  	if e.hasher != nil {
  2181  		// hasher will not return an error, just panic
  2182  		_, _ = e.hasher.Write(p[:n])
  2183  	}
  2184  	if err != nil { // io.Error or EOF
  2185  		err = e.handle(err)
  2186  	}
  2187  	return
  2188  }
  2189  
  2190  func (e *endHandler) Close() error {
  2191  	_ = e.handle(nil) // ignore returned error
  2192  	return e.stream.Close()
  2193  }
  2194  
  2195  func (e *endHandler) handle(err error) error {
  2196  	if e.done {
  2197  		return err
  2198  	}
  2199  	e.done = true
  2200  	o := e.o
  2201  
  2202  	o.fs.fileServers.Free(e.server)
  2203  	if err != io.EOF || e.hasher == nil {
  2204  		return err
  2205  	}
  2206  
  2207  	newHash := e.hasher.Sum(nil)
  2208  	if bytes.Compare(o.mrHash, newHash) == 0 {
  2209  		return io.EOF
  2210  	}
  2211  	if o.fs.opt.CheckHash {
  2212  		return mrhash.ErrorInvalidHash
  2213  	}
  2214  	fs.Infof(o, "hash mismatch on download: expected %x received %x", o.mrHash, newHash)
  2215  	return io.EOF
  2216  }
  2217  
  2218  // serverPool backs server dispacher
  2219  type serverPool struct {
  2220  	pool      pendingServerMap
  2221  	mu        sync.Mutex
  2222  	path      string
  2223  	expirySec time.Duration
  2224  	fs        *Fs
  2225  }
  2226  
  2227  type pendingServerMap map[string]*pendingServer
  2228  
  2229  type pendingServer struct {
  2230  	locks  int
  2231  	expiry time.Time
  2232  }
  2233  
  2234  // Dispatch dispatches next download server.
  2235  // It prefers switching and tries to avoid current server
  2236  // in use by caller because it may be overloaded or slow.
  2237  func (p *serverPool) Dispatch(ctx context.Context, current string) (string, error) {
  2238  	now := time.Now()
  2239  	url := p.getServer(current, now)
  2240  	if url != "" {
  2241  		return url, nil
  2242  	}
  2243  
  2244  	// Server not found - ask Mailru dispatcher.
  2245  	opts := rest.Opts{
  2246  		Method:  "GET",
  2247  		RootURL: api.DispatchServerURL,
  2248  		Path:    p.path,
  2249  	}
  2250  	var (
  2251  		res *http.Response
  2252  		err error
  2253  	)
  2254  	err = p.fs.pacer.Call(func() (bool, error) {
  2255  		res, err = p.fs.srv.Call(ctx, &opts)
  2256  		if err != nil {
  2257  			return fserrors.ShouldRetry(err), err
  2258  		}
  2259  		url, err = readBodyWord(res)
  2260  		return fserrors.ShouldRetry(err), err
  2261  	})
  2262  	if err != nil || url == "" {
  2263  		closeBody(res)
  2264  		return "", errors.Wrap(err, "Failed to request file server")
  2265  	}
  2266  
  2267  	p.addServer(url, now)
  2268  	return url, nil
  2269  }
  2270  
  2271  func (p *serverPool) Free(url string) {
  2272  	if url == "" {
  2273  		return
  2274  	}
  2275  	p.mu.Lock()
  2276  	defer p.mu.Unlock()
  2277  
  2278  	srv := p.pool[url]
  2279  	if srv == nil {
  2280  		return
  2281  	}
  2282  
  2283  	if srv.locks <= 0 {
  2284  		// Getting here indicates possible race
  2285  		fs.Infof(p.fs, "Purge file server:  locks -, url %s", url)
  2286  		delete(p.pool, url)
  2287  		return
  2288  	}
  2289  
  2290  	srv.locks--
  2291  	if srv.locks == 0 && time.Now().After(srv.expiry) {
  2292  		delete(p.pool, url)
  2293  		fs.Debugf(p.fs, "Free file server:   locks 0, url %s", url)
  2294  		return
  2295  	}
  2296  	fs.Debugf(p.fs, "Unlock file server: locks %d, url %s", srv.locks, url)
  2297  }
  2298  
  2299  // Find an underlocked server
  2300  func (p *serverPool) getServer(current string, now time.Time) string {
  2301  	p.mu.Lock()
  2302  	defer p.mu.Unlock()
  2303  
  2304  	for url, srv := range p.pool {
  2305  		if url == "" || srv.locks < 0 {
  2306  			continue // Purged server slot
  2307  		}
  2308  		if url == current {
  2309  			continue // Current server - prefer another
  2310  		}
  2311  		if srv.locks >= maxServerLocks {
  2312  			continue // Overlocked server
  2313  		}
  2314  		if now.After(srv.expiry) {
  2315  			continue // Expired server
  2316  		}
  2317  
  2318  		srv.locks++
  2319  		fs.Debugf(p.fs, "Lock file server:   locks %d, url %s", srv.locks, url)
  2320  		return url
  2321  	}
  2322  
  2323  	return ""
  2324  }
  2325  
  2326  func (p *serverPool) addServer(url string, now time.Time) {
  2327  	p.mu.Lock()
  2328  	defer p.mu.Unlock()
  2329  
  2330  	expiry := now.Add(p.expirySec * time.Second)
  2331  
  2332  	expiryStr := []byte("-")
  2333  	if fs.Config.LogLevel >= fs.LogLevelInfo {
  2334  		expiryStr, _ = expiry.MarshalJSON()
  2335  	}
  2336  
  2337  	// Attach to a server proposed by dispatcher
  2338  	srv := p.pool[url]
  2339  	if srv != nil {
  2340  		srv.locks++
  2341  		srv.expiry = expiry
  2342  		fs.Debugf(p.fs, "Reuse file server:  locks %d, url %s, expiry %s", srv.locks, url, expiryStr)
  2343  		return
  2344  	}
  2345  
  2346  	// Add new server
  2347  	p.pool[url] = &pendingServer{locks: 1, expiry: expiry}
  2348  	fs.Debugf(p.fs, "Switch file server: locks 1, url %s, expiry %s", url, expiryStr)
  2349  }
  2350  
  2351  // Name of the remote (as passed into NewFs)
  2352  func (f *Fs) Name() string {
  2353  	return f.name
  2354  }
  2355  
  2356  // Root of the remote (as passed into NewFs)
  2357  func (f *Fs) Root() string {
  2358  	return f.root
  2359  }
  2360  
  2361  // String converts this Fs to a string
  2362  func (f *Fs) String() string {
  2363  	return fmt.Sprintf("[%s]", f.root)
  2364  }
  2365  
  2366  // Precision return the precision of this Fs
  2367  func (f *Fs) Precision() time.Duration {
  2368  	return time.Second
  2369  }
  2370  
  2371  // Hashes returns the supported hash sets
  2372  func (f *Fs) Hashes() hash.Set {
  2373  	return hash.Set(MrHashType)
  2374  }
  2375  
  2376  // Features returns the optional features of this Fs
  2377  func (f *Fs) Features() *fs.Features {
  2378  	return f.features
  2379  }
  2380  
  2381  // close response body ignoring errors
  2382  func closeBody(res *http.Response) {
  2383  	if res != nil {
  2384  		_ = res.Body.Close()
  2385  	}
  2386  }
  2387  
  2388  // Check the interfaces are satisfied
  2389  var (
  2390  	_ fs.Fs           = (*Fs)(nil)
  2391  	_ fs.Purger       = (*Fs)(nil)
  2392  	_ fs.Copier       = (*Fs)(nil)
  2393  	_ fs.Mover        = (*Fs)(nil)
  2394  	_ fs.DirMover     = (*Fs)(nil)
  2395  	_ fs.PublicLinker = (*Fs)(nil)
  2396  	_ fs.CleanUpper   = (*Fs)(nil)
  2397  	_ fs.Abouter      = (*Fs)(nil)
  2398  	_ fs.Object       = (*Object)(nil)
  2399  )