github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/mega/mega.go (about)

     1  // Package mega provides an interface to the Mega
     2  // object storage system.
     3  package mega
     4  
     5  /*
     6  Open questions
     7  * Does mega support a content hash - what exactly are the mega hashes?
     8  * Can mega support setting modification times?
     9  
    10  Improvements:
    11  * Uploads could be done in parallel
    12  * Downloads would be more efficient done in one go
    13  * Uploads would be more efficient with bigger chunks
    14  * Looks like mega can support server side copy, but it isn't implemented in go-mega
    15  * Upload can set modtime... - set as int64_t - can set ctime and mtime?
    16  */
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"io"
    22  	"path"
    23  	"strings"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/ncw/rclone/fs"
    28  	"github.com/ncw/rclone/fs/config/configmap"
    29  	"github.com/ncw/rclone/fs/config/configstruct"
    30  	"github.com/ncw/rclone/fs/config/obscure"
    31  	"github.com/ncw/rclone/fs/fshttp"
    32  	"github.com/ncw/rclone/fs/hash"
    33  	"github.com/ncw/rclone/lib/pacer"
    34  	"github.com/ncw/rclone/lib/readers"
    35  	"github.com/pkg/errors"
    36  	mega "github.com/t3rm1n4l/go-mega"
    37  )
    38  
    39  const (
    40  	minSleep      = 10 * time.Millisecond
    41  	maxSleep      = 2 * time.Second
    42  	eventWaitTime = 500 * time.Millisecond
    43  	decayConstant = 2 // bigger for slower decay, exponential
    44  )
    45  
    46  var (
    47  	megaCacheMu sync.Mutex                // mutex for the below
    48  	megaCache   = map[string]*mega.Mega{} // cache logged in Mega's by user
    49  )
    50  
    51  // Register with Fs
    52  func init() {
    53  	fs.Register(&fs.RegInfo{
    54  		Name:        "mega",
    55  		Description: "Mega",
    56  		NewFs:       NewFs,
    57  		Options: []fs.Option{{
    58  			Name:     "user",
    59  			Help:     "User name",
    60  			Required: true,
    61  		}, {
    62  			Name:       "pass",
    63  			Help:       "Password.",
    64  			Required:   true,
    65  			IsPassword: true,
    66  		}, {
    67  			Name: "debug",
    68  			Help: `Output more debug from Mega.
    69  
    70  If this flag is set (along with -vv) it will print further debugging
    71  information from the mega backend.`,
    72  			Default:  false,
    73  			Advanced: true,
    74  		}, {
    75  			Name: "hard_delete",
    76  			Help: `Delete files permanently rather than putting them into the trash.
    77  
    78  Normally the mega backend will put all deletions into the trash rather
    79  than permanently deleting them.  If you specify this then rclone will
    80  permanently delete objects instead.`,
    81  			Default:  false,
    82  			Advanced: true,
    83  		}},
    84  	})
    85  }
    86  
    87  // Options defines the configuration for this backend
    88  type Options struct {
    89  	User       string `config:"user"`
    90  	Pass       string `config:"pass"`
    91  	Debug      bool   `config:"debug"`
    92  	HardDelete bool   `config:"hard_delete"`
    93  }
    94  
    95  // Fs represents a remote mega
    96  type Fs struct {
    97  	name       string       // name of this remote
    98  	root       string       // the path we are working on
    99  	opt        Options      // parsed config options
   100  	features   *fs.Features // optional features
   101  	srv        *mega.Mega   // the connection to the server
   102  	pacer      *fs.Pacer    // pacer for API calls
   103  	rootNodeMu sync.Mutex   // mutex for _rootNode
   104  	_rootNode  *mega.Node   // root node - call findRoot to use this
   105  	mkdirMu    sync.Mutex   // used to serialize calls to mkdir / rmdir
   106  }
   107  
   108  // Object describes a mega object
   109  //
   110  // Will definitely have info but maybe not meta
   111  //
   112  // Normally rclone would just store an ID here but go-mega and mega.nz
   113  // expect you to build an entire tree of all the objects in memory.
   114  // In this case we just store a pointer to the object.
   115  type Object struct {
   116  	fs     *Fs        // what this object is part of
   117  	remote string     // The remote path
   118  	info   *mega.Node // pointer to the mega node
   119  }
   120  
   121  // ------------------------------------------------------------
   122  
   123  // Name of the remote (as passed into NewFs)
   124  func (f *Fs) Name() string {
   125  	return f.name
   126  }
   127  
   128  // Root of the remote (as passed into NewFs)
   129  func (f *Fs) Root() string {
   130  	return f.root
   131  }
   132  
   133  // String converts this Fs to a string
   134  func (f *Fs) String() string {
   135  	return fmt.Sprintf("mega root '%s'", f.root)
   136  }
   137  
   138  // Features returns the optional features of this Fs
   139  func (f *Fs) Features() *fs.Features {
   140  	return f.features
   141  }
   142  
   143  // parsePath parses an mega 'url'
   144  func parsePath(path string) (root string) {
   145  	root = strings.Trim(path, "/")
   146  	return
   147  }
   148  
   149  // shouldRetry returns a boolean as to whether this err deserves to be
   150  // retried.  It returns the err as a convenience
   151  func shouldRetry(err error) (bool, error) {
   152  	// Let the mega library handle the low level retries
   153  	return false, err
   154  	/*
   155  		switch errors.Cause(err) {
   156  		case mega.EAGAIN, mega.ERATELIMIT, mega.ETEMPUNAVAIL:
   157  			return true, err
   158  		}
   159  		return fserrors.ShouldRetry(err), err
   160  	*/
   161  }
   162  
   163  // readMetaDataForPath reads the metadata from the path
   164  func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
   165  	rootNode, err := f.findRoot(false)
   166  	if err != nil {
   167  		return nil, err
   168  	}
   169  	return f.findObject(rootNode, remote)
   170  }
   171  
   172  // NewFs constructs an Fs from the path, container:path
   173  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   174  	// Parse config into Options struct
   175  	opt := new(Options)
   176  	err := configstruct.Set(m, opt)
   177  	if err != nil {
   178  		return nil, err
   179  	}
   180  	if opt.Pass != "" {
   181  		var err error
   182  		opt.Pass, err = obscure.Reveal(opt.Pass)
   183  		if err != nil {
   184  			return nil, errors.Wrap(err, "couldn't decrypt password")
   185  		}
   186  	}
   187  
   188  	// cache *mega.Mega on username so we can re-use and share
   189  	// them between remotes.  They are expensive to make as they
   190  	// contain all the objects and sharing the objects makes the
   191  	// move code easier as we don't have to worry about mixing
   192  	// them up between different remotes.
   193  	megaCacheMu.Lock()
   194  	defer megaCacheMu.Unlock()
   195  	srv := megaCache[opt.User]
   196  	if srv == nil {
   197  		srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
   198  		srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
   199  		srv.SetLogger(func(format string, v ...interface{}) {
   200  			fs.Infof("*go-mega*", format, v...)
   201  		})
   202  		if opt.Debug {
   203  			srv.SetDebugger(func(format string, v ...interface{}) {
   204  				fs.Debugf("*go-mega*", format, v...)
   205  			})
   206  		}
   207  
   208  		err := srv.Login(opt.User, opt.Pass)
   209  		if err != nil {
   210  			return nil, errors.Wrap(err, "couldn't login")
   211  		}
   212  		megaCache[opt.User] = srv
   213  	}
   214  
   215  	root = parsePath(root)
   216  	f := &Fs{
   217  		name:  name,
   218  		root:  root,
   219  		opt:   *opt,
   220  		srv:   srv,
   221  		pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   222  	}
   223  	f.features = (&fs.Features{
   224  		DuplicateFiles:          true,
   225  		CanHaveEmptyDirectories: true,
   226  	}).Fill(f)
   227  
   228  	// Find the root node and check if it is a file or not
   229  	_, err = f.findRoot(false)
   230  	switch err {
   231  	case nil:
   232  		// root node found and is a directory
   233  	case fs.ErrorDirNotFound:
   234  		// root node not found, so can't be a file
   235  	case fs.ErrorIsFile:
   236  		// root node is a file so point to parent directory
   237  		root = path.Dir(root)
   238  		if root == "." {
   239  			root = ""
   240  		}
   241  		f.root = root
   242  		return f, err
   243  	}
   244  	return f, nil
   245  }
   246  
   247  // splitNodePath splits nodePath into / separated parts, returning nil if it
   248  // should refer to the root
   249  func splitNodePath(nodePath string) (parts []string) {
   250  	nodePath = path.Clean(nodePath)
   251  	parts = strings.Split(nodePath, "/")
   252  	if len(parts) == 1 && (parts[0] == "." || parts[0] == "/") {
   253  		return nil
   254  	}
   255  	return parts
   256  }
   257  
   258  // findNode looks up the node for the path of the name given from the root given
   259  //
   260  // It returns mega.ENOENT if it wasn't found
   261  func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) {
   262  	parts := splitNodePath(nodePath)
   263  	if parts == nil {
   264  		return rootNode, nil
   265  	}
   266  	nodes, err := f.srv.FS.PathLookup(rootNode, parts)
   267  	if err != nil {
   268  		return nil, err
   269  	}
   270  	return nodes[len(nodes)-1], nil
   271  }
   272  
   273  // findDir finds the directory rooted from the node passed in
   274  func (f *Fs) findDir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
   275  	node, err = f.findNode(rootNode, dir)
   276  	if err == mega.ENOENT {
   277  		return nil, fs.ErrorDirNotFound
   278  	} else if err == nil && node.GetType() == mega.FILE {
   279  		return nil, fs.ErrorIsFile
   280  	}
   281  	return node, err
   282  }
   283  
   284  // findObject looks up the node for the object of the name given
   285  func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err error) {
   286  	node, err = f.findNode(rootNode, file)
   287  	if err == mega.ENOENT {
   288  		return nil, fs.ErrorObjectNotFound
   289  	} else if err == nil && node.GetType() != mega.FILE {
   290  		return nil, fs.ErrorNotAFile
   291  	}
   292  	return node, err
   293  }
   294  
   295  // lookupDir looks up the node for the directory of the name given
   296  //
   297  // if create is true it tries to create the root directory if not found
   298  func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
   299  	rootNode, err := f.findRoot(false)
   300  	if err != nil {
   301  		return nil, err
   302  	}
   303  	return f.findDir(rootNode, dir)
   304  }
   305  
   306  // lookupParentDir finds the parent node for the remote passed in
   307  func (f *Fs) lookupParentDir(remote string) (dirNode *mega.Node, leaf string, err error) {
   308  	parent, leaf := path.Split(remote)
   309  	dirNode, err = f.lookupDir(parent)
   310  	return dirNode, leaf, err
   311  }
   312  
   313  // mkdir makes the directory and any parent directories for the
   314  // directory of the name given
   315  func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
   316  	f.mkdirMu.Lock()
   317  	defer f.mkdirMu.Unlock()
   318  
   319  	parts := splitNodePath(dir)
   320  	if parts == nil {
   321  		return rootNode, nil
   322  	}
   323  	var i int
   324  	// look up until we find a directory which exists
   325  	for i = 0; i <= len(parts); i++ {
   326  		var nodes []*mega.Node
   327  		nodes, err = f.srv.FS.PathLookup(rootNode, parts[:len(parts)-i])
   328  		if err == nil {
   329  			if len(nodes) == 0 {
   330  				node = rootNode
   331  			} else {
   332  				node = nodes[len(nodes)-1]
   333  			}
   334  			break
   335  		}
   336  		if err != mega.ENOENT {
   337  			return nil, errors.Wrap(err, "mkdir lookup failed")
   338  		}
   339  	}
   340  	if err != nil {
   341  		return nil, errors.Wrap(err, "internal error: mkdir called with non existent root node")
   342  	}
   343  	// i is number of directories to create (may be 0)
   344  	// node is directory to create them from
   345  	for _, name := range parts[len(parts)-i:] {
   346  		// create directory called name in node
   347  		err = f.pacer.Call(func() (bool, error) {
   348  			node, err = f.srv.CreateDir(name, node)
   349  			return shouldRetry(err)
   350  		})
   351  		if err != nil {
   352  			return nil, errors.Wrap(err, "mkdir create node failed")
   353  		}
   354  	}
   355  	return node, nil
   356  }
   357  
   358  // mkdirParent creates the parent directory of remote
   359  func (f *Fs) mkdirParent(remote string) (dirNode *mega.Node, leaf string, err error) {
   360  	rootNode, err := f.findRoot(true)
   361  	if err != nil {
   362  		return nil, "", err
   363  	}
   364  	parent, leaf := path.Split(remote)
   365  	dirNode, err = f.mkdir(rootNode, parent)
   366  	return dirNode, leaf, err
   367  }
   368  
   369  // findRoot looks up the root directory node and returns it.
   370  //
   371  // if create is true it tries to create the root directory if not found
   372  func (f *Fs) findRoot(create bool) (*mega.Node, error) {
   373  	f.rootNodeMu.Lock()
   374  	defer f.rootNodeMu.Unlock()
   375  
   376  	// Check if we haven't found it already
   377  	if f._rootNode != nil {
   378  		return f._rootNode, nil
   379  	}
   380  
   381  	// Check for pre-existing root
   382  	absRoot := f.srv.FS.GetRoot()
   383  	node, err := f.findDir(absRoot, f.root)
   384  	//log.Printf("findRoot findDir %p %v", node, err)
   385  	if err == nil {
   386  		f._rootNode = node
   387  		return node, nil
   388  	}
   389  	if !create || err != fs.ErrorDirNotFound {
   390  		return nil, err
   391  	}
   392  
   393  	//..not found so create the root directory
   394  	f._rootNode, err = f.mkdir(absRoot, f.root)
   395  	return f._rootNode, err
   396  }
   397  
   398  // clearRoot unsets the root directory
   399  func (f *Fs) clearRoot() {
   400  	f.rootNodeMu.Lock()
   401  	f._rootNode = nil
   402  	f.rootNodeMu.Unlock()
   403  	//log.Printf("cleared root directory")
   404  }
   405  
   406  // CleanUp deletes all files currently in trash
   407  func (f *Fs) CleanUp(ctx context.Context) (err error) {
   408  	trash := f.srv.FS.GetTrash()
   409  	items := []*mega.Node{}
   410  	_, err = f.list(ctx, trash, func(item *mega.Node) bool {
   411  		items = append(items, item)
   412  		return false
   413  	})
   414  	if err != nil {
   415  		return errors.Wrap(err, "CleanUp failed to list items in trash")
   416  	}
   417  	fs.Infof(f, "Deleting %d items from the trash", len(items))
   418  	errors := 0
   419  	// similar to f.deleteNode(trash) but with HardDelete as true
   420  	for _, item := range items {
   421  		fs.Debugf(f, "Deleting trash %q", item.GetName())
   422  		deleteErr := f.pacer.Call(func() (bool, error) {
   423  			err := f.srv.Delete(item, true)
   424  			return shouldRetry(err)
   425  		})
   426  		if deleteErr != nil {
   427  			err = deleteErr
   428  			errors++
   429  		}
   430  	}
   431  	fs.Infof(f, "Deleted %d items from the trash with %d errors", len(items), errors)
   432  	return err
   433  }
   434  
   435  // Return an Object from a path
   436  //
   437  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   438  func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error) {
   439  	o := &Object{
   440  		fs:     f,
   441  		remote: remote,
   442  	}
   443  	var err error
   444  	if info != nil {
   445  		// Set info
   446  		err = o.setMetaData(info)
   447  	} else {
   448  		err = o.readMetaData() // reads info and meta, returning an error
   449  	}
   450  	if err != nil {
   451  		return nil, err
   452  	}
   453  	return o, nil
   454  }
   455  
   456  // NewObject finds the Object at remote.  If it can't be found
   457  // it returns the error fs.ErrorObjectNotFound.
   458  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   459  	return f.newObjectWithInfo(remote, nil)
   460  }
   461  
   462  // list the objects into the function supplied
   463  //
   464  // If directories is set it only sends directories
   465  // User function to process a File item from listAll
   466  //
   467  // Should return true to finish processing
   468  type listFn func(*mega.Node) bool
   469  
   470  // Lists the directory required calling the user function on each item found
   471  //
   472  // If the user fn ever returns true then it early exits with found = true
   473  func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
   474  	nodes, err := f.srv.FS.GetChildren(dir)
   475  	if err != nil {
   476  		return false, errors.Wrapf(err, "list failed")
   477  	}
   478  	for _, item := range nodes {
   479  		if fn(item) {
   480  			found = true
   481  			break
   482  		}
   483  	}
   484  	return
   485  }
   486  
   487  // List the objects and directories in dir into entries.  The
   488  // entries can be returned in any order but should be for a
   489  // complete directory.
   490  //
   491  // dir should be "" to list the root, and should not have
   492  // trailing slashes.
   493  //
   494  // This should return ErrDirNotFound if the directory isn't
   495  // found.
   496  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   497  	dirNode, err := f.lookupDir(dir)
   498  	if err != nil {
   499  		return nil, err
   500  	}
   501  	var iErr error
   502  	_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
   503  		remote := path.Join(dir, info.GetName())
   504  		switch info.GetType() {
   505  		case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
   506  			d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
   507  			entries = append(entries, d)
   508  		case mega.FILE:
   509  			o, err := f.newObjectWithInfo(remote, info)
   510  			if err != nil {
   511  				iErr = err
   512  				return true
   513  			}
   514  			entries = append(entries, o)
   515  		}
   516  		return false
   517  	})
   518  	if err != nil {
   519  		return nil, err
   520  	}
   521  	if iErr != nil {
   522  		return nil, iErr
   523  	}
   524  	return entries, nil
   525  }
   526  
   527  // Creates from the parameters passed in a half finished Object which
   528  // must have setMetaData called on it
   529  //
   530  // Returns the dirNode, object, leaf and error
   531  //
   532  // Used to create new objects
   533  func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
   534  	dirNode, leaf, err = f.mkdirParent(remote)
   535  	if err != nil {
   536  		return nil, nil, leaf, err
   537  	}
   538  	// Temporary Object under construction
   539  	o = &Object{
   540  		fs:     f,
   541  		remote: remote,
   542  	}
   543  	return o, dirNode, leaf, nil
   544  }
   545  
   546  // Put the object
   547  //
   548  // Copy the reader in to the new object which is returned
   549  //
   550  // The new object may have been created if an error is returned
   551  // PutUnchecked uploads the object
   552  //
   553  // This will create a duplicate if we upload a new file without
   554  // checking to see if there is one already - use Put() for that.
   555  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   556  	existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
   557  	switch err {
   558  	case nil:
   559  		return existingObj, existingObj.Update(ctx, in, src, options...)
   560  	case fs.ErrorObjectNotFound:
   561  		// Not found so create it
   562  		return f.PutUnchecked(ctx, in, src)
   563  	default:
   564  		return nil, err
   565  	}
   566  }
   567  
   568  // PutUnchecked the object
   569  //
   570  // Copy the reader in to the new object which is returned
   571  //
   572  // The new object may have been created if an error is returned
   573  // PutUnchecked uploads the object
   574  //
   575  // This will create a duplicate if we upload a new file without
   576  // checking to see if there is one already - use Put() for that.
   577  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   578  	remote := src.Remote()
   579  	size := src.Size()
   580  	modTime := src.ModTime(ctx)
   581  
   582  	o, _, _, err := f.createObject(remote, modTime, size)
   583  	if err != nil {
   584  		return nil, err
   585  	}
   586  	return o, o.Update(ctx, in, src, options...)
   587  }
   588  
   589  // Mkdir creates the directory if it doesn't exist
   590  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   591  	rootNode, err := f.findRoot(true)
   592  	if err != nil {
   593  		return err
   594  	}
   595  	_, err = f.mkdir(rootNode, dir)
   596  	return errors.Wrap(err, "Mkdir failed")
   597  }
   598  
   599  // deleteNode removes a file or directory, observing useTrash
   600  func (f *Fs) deleteNode(node *mega.Node) (err error) {
   601  	err = f.pacer.Call(func() (bool, error) {
   602  		err = f.srv.Delete(node, f.opt.HardDelete)
   603  		return shouldRetry(err)
   604  	})
   605  	return err
   606  }
   607  
   608  // purgeCheck removes the directory dir, if check is set then it
   609  // refuses to do so if it has anything in
   610  func (f *Fs) purgeCheck(dir string, check bool) error {
   611  	f.mkdirMu.Lock()
   612  	defer f.mkdirMu.Unlock()
   613  
   614  	rootNode, err := f.findRoot(false)
   615  	if err != nil {
   616  		return err
   617  	}
   618  	dirNode, err := f.findDir(rootNode, dir)
   619  	if err != nil {
   620  		return err
   621  	}
   622  
   623  	if check {
   624  		children, err := f.srv.FS.GetChildren(dirNode)
   625  		if err != nil {
   626  			return errors.Wrap(err, "purgeCheck GetChildren failed")
   627  		}
   628  		if len(children) > 0 {
   629  			return fs.ErrorDirectoryNotEmpty
   630  		}
   631  	}
   632  
   633  	waitEvent := f.srv.WaitEventsStart()
   634  
   635  	err = f.deleteNode(dirNode)
   636  	if err != nil {
   637  		return errors.Wrap(err, "delete directory node failed")
   638  	}
   639  
   640  	// Remove the root node if we just deleted it
   641  	if dirNode == rootNode {
   642  		f.clearRoot()
   643  	}
   644  
   645  	f.srv.WaitEvents(waitEvent, eventWaitTime)
   646  	return nil
   647  }
   648  
   649  // Rmdir deletes the root folder
   650  //
   651  // Returns an error if it isn't empty
   652  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   653  	return f.purgeCheck(dir, true)
   654  }
   655  
   656  // Precision return the precision of this Fs
   657  func (f *Fs) Precision() time.Duration {
   658  	return fs.ModTimeNotSupported
   659  }
   660  
   661  // Purge deletes all the files and the container
   662  //
   663  // Optional interface: Only implement this if you have a way of
   664  // deleting all the files quicker than just running Remove() on the
   665  // result of List()
   666  func (f *Fs) Purge(ctx context.Context) error {
   667  	return f.purgeCheck("", false)
   668  }
   669  
   670  // move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
   671  //
   672  // info will be updates
   673  func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
   674  	var (
   675  		dstFs                  = f
   676  		srcDirNode, dstDirNode *mega.Node
   677  		srcParent, dstParent   string
   678  		srcLeaf, dstLeaf       string
   679  	)
   680  
   681  	if dstRemote != "" {
   682  		// lookup or create the destination parent directory
   683  		dstDirNode, dstLeaf, err = dstFs.mkdirParent(dstRemote)
   684  	} else {
   685  		// find or create the parent of the root directory
   686  		absRoot := dstFs.srv.FS.GetRoot()
   687  		dstParent, dstLeaf = path.Split(dstFs.root)
   688  		dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
   689  	}
   690  	if err != nil {
   691  		return errors.Wrap(err, "server side move failed to make dst parent dir")
   692  	}
   693  
   694  	if srcRemote != "" {
   695  		// lookup the existing parent directory
   696  		srcDirNode, srcLeaf, err = srcFs.lookupParentDir(srcRemote)
   697  	} else {
   698  		// lookup the existing root parent
   699  		absRoot := srcFs.srv.FS.GetRoot()
   700  		srcParent, srcLeaf = path.Split(srcFs.root)
   701  		srcDirNode, err = f.findDir(absRoot, srcParent)
   702  	}
   703  	if err != nil {
   704  		return errors.Wrap(err, "server side move failed to lookup src parent dir")
   705  	}
   706  
   707  	// move the object into its new directory if required
   708  	if srcDirNode != dstDirNode && srcDirNode.GetHash() != dstDirNode.GetHash() {
   709  		//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
   710  		err = f.pacer.Call(func() (bool, error) {
   711  			err = f.srv.Move(info, dstDirNode)
   712  			return shouldRetry(err)
   713  		})
   714  		if err != nil {
   715  			return errors.Wrap(err, "server side move failed")
   716  		}
   717  	}
   718  
   719  	waitEvent := f.srv.WaitEventsStart()
   720  
   721  	// rename the object if required
   722  	if srcLeaf != dstLeaf {
   723  		//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
   724  		err = f.pacer.Call(func() (bool, error) {
   725  			err = f.srv.Rename(info, dstLeaf)
   726  			return shouldRetry(err)
   727  		})
   728  		if err != nil {
   729  			return errors.Wrap(err, "server side rename failed")
   730  		}
   731  	}
   732  
   733  	f.srv.WaitEvents(waitEvent, eventWaitTime)
   734  
   735  	return nil
   736  }
   737  
   738  // Move src to this remote using server side move operations.
   739  //
   740  // This is stored with the remote path given
   741  //
   742  // It returns the destination Object and a possible error
   743  //
   744  // Will only be called if src.Fs().Name() == f.Name()
   745  //
   746  // If it isn't possible then return fs.ErrorCantMove
   747  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   748  	dstFs := f
   749  
   750  	//log.Printf("Move %q -> %q", src.Remote(), remote)
   751  	srcObj, ok := src.(*Object)
   752  	if !ok {
   753  		fs.Debugf(src, "Can't move - not same remote type")
   754  		return nil, fs.ErrorCantMove
   755  	}
   756  
   757  	// Do the move
   758  	err := f.move(remote, srcObj.fs, srcObj.remote, srcObj.info)
   759  	if err != nil {
   760  		return nil, err
   761  	}
   762  
   763  	// Create a destination object
   764  	dstObj := &Object{
   765  		fs:     dstFs,
   766  		remote: remote,
   767  		info:   srcObj.info,
   768  	}
   769  	return dstObj, nil
   770  }
   771  
   772  // DirMove moves src, srcRemote to this remote at dstRemote
   773  // using server side move operations.
   774  //
   775  // Will only be called if src.Fs().Name() == f.Name()
   776  //
   777  // If it isn't possible then return fs.ErrorCantDirMove
   778  //
   779  // If destination exists then return fs.ErrorDirExists
   780  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
   781  	dstFs := f
   782  	srcFs, ok := src.(*Fs)
   783  	if !ok {
   784  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
   785  		return fs.ErrorCantDirMove
   786  	}
   787  
   788  	// find the source
   789  	info, err := srcFs.lookupDir(srcRemote)
   790  	if err != nil {
   791  		return err
   792  	}
   793  
   794  	// check the destination doesn't exist
   795  	_, err = dstFs.lookupDir(dstRemote)
   796  	if err == nil {
   797  		return fs.ErrorDirExists
   798  	} else if err != fs.ErrorDirNotFound {
   799  		return errors.Wrap(err, "DirMove error while checking dest directory")
   800  	}
   801  
   802  	// Do the move
   803  	err = f.move(dstRemote, srcFs, srcRemote, info)
   804  	if err != nil {
   805  		return err
   806  	}
   807  
   808  	// Clear src if it was the root
   809  	if srcRemote == "" {
   810  		srcFs.clearRoot()
   811  	}
   812  
   813  	return nil
   814  }
   815  
   816  // DirCacheFlush an optional interface to flush internal directory cache
   817  func (f *Fs) DirCacheFlush() {
   818  	// f.dirCache.ResetRoot()
   819  	// FIXME Flush the mega somehow?
   820  }
   821  
   822  // Hashes returns the supported hash sets.
   823  func (f *Fs) Hashes() hash.Set {
   824  	return hash.Set(hash.None)
   825  }
   826  
   827  // PublicLink generates a public link to the remote path (usually readable by anyone)
   828  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
   829  	root, err := f.findRoot(false)
   830  	if err != nil {
   831  		return "", errors.Wrap(err, "PublicLink failed to find root node")
   832  	}
   833  	node, err := f.findNode(root, remote)
   834  	if err != nil {
   835  		return "", errors.Wrap(err, "PublicLink failed to find path")
   836  	}
   837  	link, err = f.srv.Link(node, true)
   838  	if err != nil {
   839  		return "", errors.Wrap(err, "PublicLink failed to create link")
   840  	}
   841  	return link, nil
   842  }
   843  
   844  // MergeDirs merges the contents of all the directories passed
   845  // in into the first one and rmdirs the other directories.
   846  func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
   847  	if len(dirs) < 2 {
   848  		return nil
   849  	}
   850  	// find dst directory
   851  	dstDir := dirs[0]
   852  	dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
   853  	if dstDirNode == nil {
   854  		return errors.Errorf("MergeDirs failed to find node for: %v", dstDir)
   855  	}
   856  	for _, srcDir := range dirs[1:] {
   857  		// find src directory
   858  		srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
   859  		if srcDirNode == nil {
   860  			return errors.Errorf("MergeDirs failed to find node for: %v", srcDir)
   861  		}
   862  
   863  		// list the the objects
   864  		infos := []*mega.Node{}
   865  		_, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool {
   866  			infos = append(infos, info)
   867  			return false
   868  		})
   869  		if err != nil {
   870  			return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
   871  		}
   872  		// move them into place
   873  		for _, info := range infos {
   874  			fs.Infof(srcDir, "merging %q", info.GetName())
   875  			err = f.pacer.Call(func() (bool, error) {
   876  				err = f.srv.Move(info, dstDirNode)
   877  				return shouldRetry(err)
   878  			})
   879  			if err != nil {
   880  				return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
   881  			}
   882  		}
   883  		// rmdir (into trash) the now empty source directory
   884  		fs.Infof(srcDir, "removing empty directory")
   885  		err = f.deleteNode(srcDirNode)
   886  		if err != nil {
   887  			return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
   888  		}
   889  	}
   890  	return nil
   891  }
   892  
   893  // About gets quota information
   894  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
   895  	var q mega.QuotaResp
   896  	var err error
   897  	err = f.pacer.Call(func() (bool, error) {
   898  		q, err = f.srv.GetQuota()
   899  		return shouldRetry(err)
   900  	})
   901  	if err != nil {
   902  		return nil, errors.Wrap(err, "failed to get Mega Quota")
   903  	}
   904  	usage := &fs.Usage{
   905  		Total: fs.NewUsageValue(int64(q.Mstrg)),           // quota of bytes that can be used
   906  		Used:  fs.NewUsageValue(int64(q.Cstrg)),           // bytes in use
   907  		Free:  fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
   908  	}
   909  	return usage, nil
   910  }
   911  
   912  // ------------------------------------------------------------
   913  
   914  // Fs returns the parent Fs
   915  func (o *Object) Fs() fs.Info {
   916  	return o.fs
   917  }
   918  
   919  // Return a string version
   920  func (o *Object) String() string {
   921  	if o == nil {
   922  		return "<nil>"
   923  	}
   924  	return o.remote
   925  }
   926  
   927  // Remote returns the remote path
   928  func (o *Object) Remote() string {
   929  	return o.remote
   930  }
   931  
   932  // Hash returns the hashes of an object
   933  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
   934  	return "", hash.ErrUnsupported
   935  }
   936  
   937  // Size returns the size of an object in bytes
   938  func (o *Object) Size() int64 {
   939  	return o.info.GetSize()
   940  }
   941  
   942  // setMetaData sets the metadata from info
   943  func (o *Object) setMetaData(info *mega.Node) (err error) {
   944  	if info.GetType() != mega.FILE {
   945  		return fs.ErrorNotAFile
   946  	}
   947  	o.info = info
   948  	return nil
   949  }
   950  
   951  // readMetaData gets the metadata if it hasn't already been fetched
   952  //
   953  // it also sets the info
   954  func (o *Object) readMetaData() (err error) {
   955  	if o.info != nil {
   956  		return nil
   957  	}
   958  	info, err := o.fs.readMetaDataForPath(o.remote)
   959  	if err != nil {
   960  		if err == fs.ErrorDirNotFound {
   961  			err = fs.ErrorObjectNotFound
   962  		}
   963  		return err
   964  	}
   965  	return o.setMetaData(info)
   966  }
   967  
   968  // ModTime returns the modification time of the object
   969  //
   970  //
   971  // It attempts to read the objects mtime and if that isn't present the
   972  // LastModified returned in the http headers
   973  func (o *Object) ModTime(ctx context.Context) time.Time {
   974  	return o.info.GetTimeStamp()
   975  }
   976  
   977  // SetModTime sets the modification time of the local fs object
   978  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
   979  	return fs.ErrorCantSetModTime
   980  }
   981  
   982  // Storable returns a boolean showing whether this object storable
   983  func (o *Object) Storable() bool {
   984  	return true
   985  }
   986  
   987  // openObject represents a download in progress
   988  type openObject struct {
   989  	mu     sync.Mutex
   990  	o      *Object
   991  	d      *mega.Download
   992  	id     int
   993  	skip   int64
   994  	chunk  []byte
   995  	closed bool
   996  }
   997  
   998  // get the next chunk
   999  func (oo *openObject) getChunk() (err error) {
  1000  	if oo.id >= oo.d.Chunks() {
  1001  		return io.EOF
  1002  	}
  1003  	var chunk []byte
  1004  	err = oo.o.fs.pacer.Call(func() (bool, error) {
  1005  		chunk, err = oo.d.DownloadChunk(oo.id)
  1006  		return shouldRetry(err)
  1007  	})
  1008  	if err != nil {
  1009  		return err
  1010  	}
  1011  	oo.id++
  1012  	oo.chunk = chunk
  1013  	return nil
  1014  }
  1015  
  1016  // Read reads up to len(p) bytes into p.
  1017  func (oo *openObject) Read(p []byte) (n int, err error) {
  1018  	oo.mu.Lock()
  1019  	defer oo.mu.Unlock()
  1020  	if oo.closed {
  1021  		return 0, errors.New("read on closed file")
  1022  	}
  1023  	// Skip data at the start if requested
  1024  	for oo.skip > 0 {
  1025  		_, size, err := oo.d.ChunkLocation(oo.id)
  1026  		if err != nil {
  1027  			return 0, err
  1028  		}
  1029  		if oo.skip < int64(size) {
  1030  			break
  1031  		}
  1032  		oo.id++
  1033  		oo.skip -= int64(size)
  1034  	}
  1035  	if len(oo.chunk) == 0 {
  1036  		err = oo.getChunk()
  1037  		if err != nil {
  1038  			return 0, err
  1039  		}
  1040  		if oo.skip > 0 {
  1041  			oo.chunk = oo.chunk[oo.skip:]
  1042  			oo.skip = 0
  1043  		}
  1044  	}
  1045  	n = copy(p, oo.chunk)
  1046  	oo.chunk = oo.chunk[n:]
  1047  	return n, nil
  1048  }
  1049  
  1050  // Close closed the file - MAC errors are reported here
  1051  func (oo *openObject) Close() (err error) {
  1052  	oo.mu.Lock()
  1053  	defer oo.mu.Unlock()
  1054  	if oo.closed {
  1055  		return nil
  1056  	}
  1057  	err = oo.o.fs.pacer.Call(func() (bool, error) {
  1058  		err = oo.d.Finish()
  1059  		return shouldRetry(err)
  1060  	})
  1061  	if err != nil {
  1062  		return errors.Wrap(err, "failed to finish download")
  1063  	}
  1064  	oo.closed = true
  1065  	return nil
  1066  }
  1067  
  1068  // Open an object for read
  1069  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1070  	var offset, limit int64 = 0, -1
  1071  	for _, option := range options {
  1072  		switch x := option.(type) {
  1073  		case *fs.SeekOption:
  1074  			offset = x.Offset
  1075  		case *fs.RangeOption:
  1076  			offset, limit = x.Decode(o.Size())
  1077  		default:
  1078  			if option.Mandatory() {
  1079  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  1080  			}
  1081  		}
  1082  	}
  1083  
  1084  	var d *mega.Download
  1085  	err = o.fs.pacer.Call(func() (bool, error) {
  1086  		d, err = o.fs.srv.NewDownload(o.info)
  1087  		return shouldRetry(err)
  1088  	})
  1089  	if err != nil {
  1090  		return nil, errors.Wrap(err, "open download file failed")
  1091  	}
  1092  
  1093  	oo := &openObject{
  1094  		o:    o,
  1095  		d:    d,
  1096  		skip: offset,
  1097  	}
  1098  
  1099  	return readers.NewLimitedReadCloser(oo, limit), nil
  1100  }
  1101  
  1102  // Update the object with the contents of the io.Reader, modTime and size
  1103  //
  1104  // If existing is set then it updates the object rather than creating a new one
  1105  //
  1106  // The new object may have been created if an error is returned
  1107  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1108  	size := src.Size()
  1109  	if size < 0 {
  1110  		return errors.New("mega backend can't upload a file of unknown length")
  1111  	}
  1112  	//modTime := src.ModTime(ctx)
  1113  	remote := o.Remote()
  1114  
  1115  	// Create the parent directory
  1116  	dirNode, leaf, err := o.fs.mkdirParent(remote)
  1117  	if err != nil {
  1118  		return errors.Wrap(err, "update make parent dir failed")
  1119  	}
  1120  
  1121  	var u *mega.Upload
  1122  	err = o.fs.pacer.Call(func() (bool, error) {
  1123  		u, err = o.fs.srv.NewUpload(dirNode, leaf, size)
  1124  		return shouldRetry(err)
  1125  	})
  1126  	if err != nil {
  1127  		return errors.Wrap(err, "upload file failed to create session")
  1128  	}
  1129  
  1130  	// Upload the chunks
  1131  	// FIXME do this in parallel
  1132  	for id := 0; id < u.Chunks(); id++ {
  1133  		_, chunkSize, err := u.ChunkLocation(id)
  1134  		if err != nil {
  1135  			return errors.Wrap(err, "upload failed to read chunk location")
  1136  		}
  1137  		chunk := make([]byte, chunkSize)
  1138  		_, err = io.ReadFull(in, chunk)
  1139  		if err != nil {
  1140  			return errors.Wrap(err, "upload failed to read data")
  1141  		}
  1142  
  1143  		err = o.fs.pacer.Call(func() (bool, error) {
  1144  			err = u.UploadChunk(id, chunk)
  1145  			return shouldRetry(err)
  1146  		})
  1147  		if err != nil {
  1148  			return errors.Wrap(err, "upload file failed to upload chunk")
  1149  		}
  1150  	}
  1151  
  1152  	// Finish the upload
  1153  	var info *mega.Node
  1154  	err = o.fs.pacer.Call(func() (bool, error) {
  1155  		info, err = u.Finish()
  1156  		return shouldRetry(err)
  1157  	})
  1158  	if err != nil {
  1159  		return errors.Wrap(err, "failed to finish upload")
  1160  	}
  1161  
  1162  	// If the upload succeeded and the original object existed, then delete it
  1163  	if o.info != nil {
  1164  		err = o.fs.deleteNode(o.info)
  1165  		if err != nil {
  1166  			return errors.Wrap(err, "upload failed to remove old version")
  1167  		}
  1168  		o.info = nil
  1169  	}
  1170  
  1171  	return o.setMetaData(info)
  1172  }
  1173  
  1174  // Remove an object
  1175  func (o *Object) Remove(ctx context.Context) error {
  1176  	err := o.fs.deleteNode(o.info)
  1177  	if err != nil {
  1178  		return errors.Wrap(err, "Remove object failed")
  1179  	}
  1180  	return nil
  1181  }
  1182  
  1183  // ID returns the ID of the Object if known, or "" if not
  1184  func (o *Object) ID() string {
  1185  	return o.info.GetHash()
  1186  }
  1187  
  1188  // Check the interfaces are satisfied
  1189  var (
  1190  	_ fs.Fs              = (*Fs)(nil)
  1191  	_ fs.Purger          = (*Fs)(nil)
  1192  	_ fs.Mover           = (*Fs)(nil)
  1193  	_ fs.PutUncheckeder  = (*Fs)(nil)
  1194  	_ fs.DirMover        = (*Fs)(nil)
  1195  	_ fs.DirCacheFlusher = (*Fs)(nil)
  1196  	_ fs.PublicLinker    = (*Fs)(nil)
  1197  	_ fs.MergeDirser     = (*Fs)(nil)
  1198  	_ fs.Abouter         = (*Fs)(nil)
  1199  	_ fs.Object          = (*Object)(nil)
  1200  	_ fs.IDer            = (*Object)(nil)
  1201  )