github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/mega/mega.go (about)

     1  // Package mega provides an interface to the Mega
     2  // object storage system.
     3  package mega
     4  
     5  /*
     6  Open questions
     7  * Does mega support a content hash - what exactly are the mega hashes?
     8  * Can mega support setting modification times?
     9  
    10  Improvements:
    11  * Uploads could be done in parallel
    12  * Downloads would be more efficient done in one go
    13  * Uploads would be more efficient with bigger chunks
    14  * Looks like mega can support server side copy, but it isn't implemented in go-mega
    15  * Upload can set modtime... - set as int64_t - can set ctime and mtime?
    16  */
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"io"
    22  	"path"
    23  	"strings"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/pkg/errors"
    28  	"github.com/rclone/rclone/fs"
    29  	"github.com/rclone/rclone/fs/config"
    30  	"github.com/rclone/rclone/fs/config/configmap"
    31  	"github.com/rclone/rclone/fs/config/configstruct"
    32  	"github.com/rclone/rclone/fs/config/obscure"
    33  	"github.com/rclone/rclone/fs/fshttp"
    34  	"github.com/rclone/rclone/fs/hash"
    35  	"github.com/rclone/rclone/lib/encoder"
    36  	"github.com/rclone/rclone/lib/pacer"
    37  	"github.com/rclone/rclone/lib/readers"
    38  	mega "github.com/t3rm1n4l/go-mega"
    39  )
    40  
    41  const (
    42  	minSleep      = 10 * time.Millisecond
    43  	maxSleep      = 2 * time.Second
    44  	eventWaitTime = 500 * time.Millisecond
    45  	decayConstant = 2 // bigger for slower decay, exponential
    46  )
    47  
    48  var (
    49  	megaCacheMu sync.Mutex                // mutex for the below
    50  	megaCache   = map[string]*mega.Mega{} // cache logged in Mega's by user
    51  )
    52  
    53  // Register with Fs
    54  func init() {
    55  	fs.Register(&fs.RegInfo{
    56  		Name:        "mega",
    57  		Description: "Mega",
    58  		NewFs:       NewFs,
    59  		Options: []fs.Option{{
    60  			Name:     "user",
    61  			Help:     "User name",
    62  			Required: true,
    63  		}, {
    64  			Name:       "pass",
    65  			Help:       "Password.",
    66  			Required:   true,
    67  			IsPassword: true,
    68  		}, {
    69  			Name: "debug",
    70  			Help: `Output more debug from Mega.
    71  
    72  If this flag is set (along with -vv) it will print further debugging
    73  information from the mega backend.`,
    74  			Default:  false,
    75  			Advanced: true,
    76  		}, {
    77  			Name: "hard_delete",
    78  			Help: `Delete files permanently rather than putting them into the trash.
    79  
    80  Normally the mega backend will put all deletions into the trash rather
    81  than permanently deleting them.  If you specify this then rclone will
    82  permanently delete objects instead.`,
    83  			Default:  false,
    84  			Advanced: true,
    85  		}, {
    86  			Name:     config.ConfigEncoding,
    87  			Help:     config.ConfigEncodingHelp,
    88  			Advanced: true,
    89  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
    90  			Default: (encoder.Base |
    91  				encoder.EncodeInvalidUtf8),
    92  		}},
    93  	})
    94  }
    95  
    96  // Options defines the configuration for this backend
    97  type Options struct {
    98  	User       string               `config:"user"`
    99  	Pass       string               `config:"pass"`
   100  	Debug      bool                 `config:"debug"`
   101  	HardDelete bool                 `config:"hard_delete"`
   102  	Enc        encoder.MultiEncoder `config:"encoding"`
   103  }
   104  
   105  // Fs represents a remote mega
   106  type Fs struct {
   107  	name       string       // name of this remote
   108  	root       string       // the path we are working on
   109  	opt        Options      // parsed config options
   110  	features   *fs.Features // optional features
   111  	srv        *mega.Mega   // the connection to the server
   112  	pacer      *fs.Pacer    // pacer for API calls
   113  	rootNodeMu sync.Mutex   // mutex for _rootNode
   114  	_rootNode  *mega.Node   // root node - call findRoot to use this
   115  	mkdirMu    sync.Mutex   // used to serialize calls to mkdir / rmdir
   116  }
   117  
   118  // Object describes a mega object
   119  //
   120  // Will definitely have info but maybe not meta
   121  //
   122  // Normally rclone would just store an ID here but go-mega and mega.nz
   123  // expect you to build an entire tree of all the objects in memory.
   124  // In this case we just store a pointer to the object.
   125  type Object struct {
   126  	fs     *Fs        // what this object is part of
   127  	remote string     // The remote path
   128  	info   *mega.Node // pointer to the mega node
   129  }
   130  
   131  // ------------------------------------------------------------
   132  
   133  // Name of the remote (as passed into NewFs)
   134  func (f *Fs) Name() string {
   135  	return f.name
   136  }
   137  
   138  // Root of the remote (as passed into NewFs)
   139  func (f *Fs) Root() string {
   140  	return f.root
   141  }
   142  
   143  // String converts this Fs to a string
   144  func (f *Fs) String() string {
   145  	return fmt.Sprintf("mega root '%s'", f.root)
   146  }
   147  
   148  // Features returns the optional features of this Fs
   149  func (f *Fs) Features() *fs.Features {
   150  	return f.features
   151  }
   152  
   153  // parsePath parses a mega 'url'
   154  func parsePath(path string) (root string) {
   155  	root = strings.Trim(path, "/")
   156  	return
   157  }
   158  
   159  // shouldRetry returns a boolean as to whether this err deserves to be
   160  // retried.  It returns the err as a convenience
   161  func shouldRetry(err error) (bool, error) {
   162  	// Let the mega library handle the low level retries
   163  	return false, err
   164  	/*
   165  		switch errors.Cause(err) {
   166  		case mega.EAGAIN, mega.ERATELIMIT, mega.ETEMPUNAVAIL:
   167  			return true, err
   168  		}
   169  		return fserrors.ShouldRetry(err), err
   170  	*/
   171  }
   172  
   173  // readMetaDataForPath reads the metadata from the path
   174  func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
   175  	rootNode, err := f.findRoot(false)
   176  	if err != nil {
   177  		return nil, err
   178  	}
   179  	return f.findObject(rootNode, remote)
   180  }
   181  
   182  // NewFs constructs an Fs from the path, container:path
   183  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   184  	// Parse config into Options struct
   185  	opt := new(Options)
   186  	err := configstruct.Set(m, opt)
   187  	if err != nil {
   188  		return nil, err
   189  	}
   190  	if opt.Pass != "" {
   191  		var err error
   192  		opt.Pass, err = obscure.Reveal(opt.Pass)
   193  		if err != nil {
   194  			return nil, errors.Wrap(err, "couldn't decrypt password")
   195  		}
   196  	}
   197  
   198  	// cache *mega.Mega on username so we can re-use and share
   199  	// them between remotes.  They are expensive to make as they
   200  	// contain all the objects and sharing the objects makes the
   201  	// move code easier as we don't have to worry about mixing
   202  	// them up between different remotes.
   203  	megaCacheMu.Lock()
   204  	defer megaCacheMu.Unlock()
   205  	srv := megaCache[opt.User]
   206  	if srv == nil {
   207  		srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
   208  		srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
   209  		srv.SetLogger(func(format string, v ...interface{}) {
   210  			fs.Infof("*go-mega*", format, v...)
   211  		})
   212  		if opt.Debug {
   213  			srv.SetDebugger(func(format string, v ...interface{}) {
   214  				fs.Debugf("*go-mega*", format, v...)
   215  			})
   216  		}
   217  
   218  		err := srv.Login(opt.User, opt.Pass)
   219  		if err != nil {
   220  			return nil, errors.Wrap(err, "couldn't login")
   221  		}
   222  		megaCache[opt.User] = srv
   223  	}
   224  
   225  	root = parsePath(root)
   226  	f := &Fs{
   227  		name:  name,
   228  		root:  root,
   229  		opt:   *opt,
   230  		srv:   srv,
   231  		pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   232  	}
   233  	f.features = (&fs.Features{
   234  		DuplicateFiles:          true,
   235  		CanHaveEmptyDirectories: true,
   236  	}).Fill(f)
   237  
   238  	// Find the root node and check if it is a file or not
   239  	_, err = f.findRoot(false)
   240  	switch err {
   241  	case nil:
   242  		// root node found and is a directory
   243  	case fs.ErrorDirNotFound:
   244  		// root node not found, so can't be a file
   245  	case fs.ErrorIsFile:
   246  		// root node is a file so point to parent directory
   247  		root = path.Dir(root)
   248  		if root == "." {
   249  			root = ""
   250  		}
   251  		f.root = root
   252  		return f, err
   253  	}
   254  	return f, nil
   255  }
   256  
   257  // splitNodePath splits nodePath into / separated parts, returning nil if it
   258  // should refer to the root.
   259  // It also encodes the parts into backend specific encoding
   260  func (f *Fs) splitNodePath(nodePath string) (parts []string) {
   261  	nodePath = path.Clean(nodePath)
   262  	if nodePath == "." || nodePath == "/" {
   263  		return nil
   264  	}
   265  	nodePath = f.opt.Enc.FromStandardPath(nodePath)
   266  	return strings.Split(nodePath, "/")
   267  }
   268  
   269  // findNode looks up the node for the path of the name given from the root given
   270  //
   271  // It returns mega.ENOENT if it wasn't found
   272  func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) {
   273  	parts := f.splitNodePath(nodePath)
   274  	if parts == nil {
   275  		return rootNode, nil
   276  	}
   277  	nodes, err := f.srv.FS.PathLookup(rootNode, parts)
   278  	if err != nil {
   279  		return nil, err
   280  	}
   281  	return nodes[len(nodes)-1], nil
   282  }
   283  
   284  // findDir finds the directory rooted from the node passed in
   285  func (f *Fs) findDir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
   286  	node, err = f.findNode(rootNode, dir)
   287  	if err == mega.ENOENT {
   288  		return nil, fs.ErrorDirNotFound
   289  	} else if err == nil && node.GetType() == mega.FILE {
   290  		return nil, fs.ErrorIsFile
   291  	}
   292  	return node, err
   293  }
   294  
   295  // findObject looks up the node for the object of the name given
   296  func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err error) {
   297  	node, err = f.findNode(rootNode, file)
   298  	if err == mega.ENOENT {
   299  		return nil, fs.ErrorObjectNotFound
   300  	} else if err == nil && node.GetType() != mega.FILE {
   301  		return nil, fs.ErrorNotAFile
   302  	}
   303  	return node, err
   304  }
   305  
   306  // lookupDir looks up the node for the directory of the name given
   307  //
   308  // if create is true it tries to create the root directory if not found
   309  func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
   310  	rootNode, err := f.findRoot(false)
   311  	if err != nil {
   312  		return nil, err
   313  	}
   314  	return f.findDir(rootNode, dir)
   315  }
   316  
   317  // lookupParentDir finds the parent node for the remote passed in
   318  func (f *Fs) lookupParentDir(remote string) (dirNode *mega.Node, leaf string, err error) {
   319  	parent, leaf := path.Split(remote)
   320  	dirNode, err = f.lookupDir(parent)
   321  	return dirNode, leaf, err
   322  }
   323  
   324  // mkdir makes the directory and any parent directories for the
   325  // directory of the name given
   326  func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
   327  	f.mkdirMu.Lock()
   328  	defer f.mkdirMu.Unlock()
   329  
   330  	parts := f.splitNodePath(dir)
   331  	if parts == nil {
   332  		return rootNode, nil
   333  	}
   334  	var i int
   335  	// look up until we find a directory which exists
   336  	for i = 0; i <= len(parts); i++ {
   337  		var nodes []*mega.Node
   338  		nodes, err = f.srv.FS.PathLookup(rootNode, parts[:len(parts)-i])
   339  		if err == nil {
   340  			if len(nodes) == 0 {
   341  				node = rootNode
   342  			} else {
   343  				node = nodes[len(nodes)-1]
   344  			}
   345  			break
   346  		}
   347  		if err != mega.ENOENT {
   348  			return nil, errors.Wrap(err, "mkdir lookup failed")
   349  		}
   350  	}
   351  	if err != nil {
   352  		return nil, errors.Wrap(err, "internal error: mkdir called with non existent root node")
   353  	}
   354  	// i is number of directories to create (may be 0)
   355  	// node is directory to create them from
   356  	for _, name := range parts[len(parts)-i:] {
   357  		// create directory called name in node
   358  		err = f.pacer.Call(func() (bool, error) {
   359  			node, err = f.srv.CreateDir(name, node)
   360  			return shouldRetry(err)
   361  		})
   362  		if err != nil {
   363  			return nil, errors.Wrap(err, "mkdir create node failed")
   364  		}
   365  	}
   366  	return node, nil
   367  }
   368  
   369  // mkdirParent creates the parent directory of remote
   370  func (f *Fs) mkdirParent(remote string) (dirNode *mega.Node, leaf string, err error) {
   371  	rootNode, err := f.findRoot(true)
   372  	if err != nil {
   373  		return nil, "", err
   374  	}
   375  	parent, leaf := path.Split(remote)
   376  	dirNode, err = f.mkdir(rootNode, parent)
   377  	return dirNode, leaf, err
   378  }
   379  
   380  // findRoot looks up the root directory node and returns it.
   381  //
   382  // if create is true it tries to create the root directory if not found
   383  func (f *Fs) findRoot(create bool) (*mega.Node, error) {
   384  	f.rootNodeMu.Lock()
   385  	defer f.rootNodeMu.Unlock()
   386  
   387  	// Check if we haven't found it already
   388  	if f._rootNode != nil {
   389  		return f._rootNode, nil
   390  	}
   391  
   392  	// Check for pre-existing root
   393  	absRoot := f.srv.FS.GetRoot()
   394  	node, err := f.findDir(absRoot, f.root)
   395  	//log.Printf("findRoot findDir %p %v", node, err)
   396  	if err == nil {
   397  		f._rootNode = node
   398  		return node, nil
   399  	}
   400  	if !create || err != fs.ErrorDirNotFound {
   401  		return nil, err
   402  	}
   403  
   404  	//..not found so create the root directory
   405  	f._rootNode, err = f.mkdir(absRoot, f.root)
   406  	return f._rootNode, err
   407  }
   408  
   409  // clearRoot unsets the root directory
   410  func (f *Fs) clearRoot() {
   411  	f.rootNodeMu.Lock()
   412  	f._rootNode = nil
   413  	f.rootNodeMu.Unlock()
   414  	//log.Printf("cleared root directory")
   415  }
   416  
   417  // CleanUp deletes all files currently in trash
   418  func (f *Fs) CleanUp(ctx context.Context) (err error) {
   419  	trash := f.srv.FS.GetTrash()
   420  	items := []*mega.Node{}
   421  	_, err = f.list(ctx, trash, func(item *mega.Node) bool {
   422  		items = append(items, item)
   423  		return false
   424  	})
   425  	if err != nil {
   426  		return errors.Wrap(err, "CleanUp failed to list items in trash")
   427  	}
   428  	fs.Infof(f, "Deleting %d items from the trash", len(items))
   429  	errors := 0
   430  	// similar to f.deleteNode(trash) but with HardDelete as true
   431  	for _, item := range items {
   432  		fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
   433  		deleteErr := f.pacer.Call(func() (bool, error) {
   434  			err := f.srv.Delete(item, true)
   435  			return shouldRetry(err)
   436  		})
   437  		if deleteErr != nil {
   438  			err = deleteErr
   439  			errors++
   440  		}
   441  	}
   442  	fs.Infof(f, "Deleted %d items from the trash with %d errors", len(items), errors)
   443  	return err
   444  }
   445  
   446  // Return an Object from a path
   447  //
   448  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   449  func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error) {
   450  	o := &Object{
   451  		fs:     f,
   452  		remote: remote,
   453  	}
   454  	var err error
   455  	if info != nil {
   456  		// Set info
   457  		err = o.setMetaData(info)
   458  	} else {
   459  		err = o.readMetaData() // reads info and meta, returning an error
   460  	}
   461  	if err != nil {
   462  		return nil, err
   463  	}
   464  	return o, nil
   465  }
   466  
   467  // NewObject finds the Object at remote.  If it can't be found
   468  // it returns the error fs.ErrorObjectNotFound.
   469  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   470  	return f.newObjectWithInfo(remote, nil)
   471  }
   472  
   473  // list the objects into the function supplied
   474  //
   475  // If directories is set it only sends directories
   476  // User function to process a File item from listAll
   477  //
   478  // Should return true to finish processing
   479  type listFn func(*mega.Node) bool
   480  
   481  // Lists the directory required calling the user function on each item found
   482  //
   483  // If the user fn ever returns true then it early exits with found = true
   484  func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
   485  	nodes, err := f.srv.FS.GetChildren(dir)
   486  	if err != nil {
   487  		return false, errors.Wrapf(err, "list failed")
   488  	}
   489  	for _, item := range nodes {
   490  		if fn(item) {
   491  			found = true
   492  			break
   493  		}
   494  	}
   495  	return
   496  }
   497  
   498  // List the objects and directories in dir into entries.  The
   499  // entries can be returned in any order but should be for a
   500  // complete directory.
   501  //
   502  // dir should be "" to list the root, and should not have
   503  // trailing slashes.
   504  //
   505  // This should return ErrDirNotFound if the directory isn't
   506  // found.
   507  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   508  	dirNode, err := f.lookupDir(dir)
   509  	if err != nil {
   510  		return nil, err
   511  	}
   512  	var iErr error
   513  	_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
   514  		remote := path.Join(dir, f.opt.Enc.ToStandardName(info.GetName()))
   515  		switch info.GetType() {
   516  		case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
   517  			d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
   518  			entries = append(entries, d)
   519  		case mega.FILE:
   520  			o, err := f.newObjectWithInfo(remote, info)
   521  			if err != nil {
   522  				iErr = err
   523  				return true
   524  			}
   525  			entries = append(entries, o)
   526  		}
   527  		return false
   528  	})
   529  	if err != nil {
   530  		return nil, err
   531  	}
   532  	if iErr != nil {
   533  		return nil, iErr
   534  	}
   535  	return entries, nil
   536  }
   537  
   538  // Creates from the parameters passed in a half finished Object which
   539  // must have setMetaData called on it
   540  //
   541  // Returns the dirNode, object, leaf and error
   542  //
   543  // Used to create new objects
   544  func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
   545  	dirNode, leaf, err = f.mkdirParent(remote)
   546  	if err != nil {
   547  		return nil, nil, leaf, err
   548  	}
   549  	// Temporary Object under construction
   550  	o = &Object{
   551  		fs:     f,
   552  		remote: remote,
   553  	}
   554  	return o, dirNode, leaf, nil
   555  }
   556  
   557  // Put the object
   558  //
   559  // Copy the reader in to the new object which is returned
   560  //
   561  // The new object may have been created if an error is returned
   562  // PutUnchecked uploads the object
   563  //
   564  // This will create a duplicate if we upload a new file without
   565  // checking to see if there is one already - use Put() for that.
   566  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   567  	existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
   568  	switch err {
   569  	case nil:
   570  		return existingObj, existingObj.Update(ctx, in, src, options...)
   571  	case fs.ErrorObjectNotFound:
   572  		// Not found so create it
   573  		return f.PutUnchecked(ctx, in, src)
   574  	default:
   575  		return nil, err
   576  	}
   577  }
   578  
   579  // PutUnchecked the object
   580  //
   581  // Copy the reader in to the new object which is returned
   582  //
   583  // The new object may have been created if an error is returned
   584  // PutUnchecked uploads the object
   585  //
   586  // This will create a duplicate if we upload a new file without
   587  // checking to see if there is one already - use Put() for that.
   588  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   589  	remote := src.Remote()
   590  	size := src.Size()
   591  	modTime := src.ModTime(ctx)
   592  
   593  	o, _, _, err := f.createObject(remote, modTime, size)
   594  	if err != nil {
   595  		return nil, err
   596  	}
   597  	return o, o.Update(ctx, in, src, options...)
   598  }
   599  
   600  // Mkdir creates the directory if it doesn't exist
   601  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   602  	rootNode, err := f.findRoot(true)
   603  	if err != nil {
   604  		return err
   605  	}
   606  	_, err = f.mkdir(rootNode, dir)
   607  	return errors.Wrap(err, "Mkdir failed")
   608  }
   609  
   610  // deleteNode removes a file or directory, observing useTrash
   611  func (f *Fs) deleteNode(node *mega.Node) (err error) {
   612  	err = f.pacer.Call(func() (bool, error) {
   613  		err = f.srv.Delete(node, f.opt.HardDelete)
   614  		return shouldRetry(err)
   615  	})
   616  	return err
   617  }
   618  
   619  // purgeCheck removes the directory dir, if check is set then it
   620  // refuses to do so if it has anything in
   621  func (f *Fs) purgeCheck(dir string, check bool) error {
   622  	f.mkdirMu.Lock()
   623  	defer f.mkdirMu.Unlock()
   624  
   625  	rootNode, err := f.findRoot(false)
   626  	if err != nil {
   627  		return err
   628  	}
   629  	dirNode, err := f.findDir(rootNode, dir)
   630  	if err != nil {
   631  		return err
   632  	}
   633  
   634  	if check {
   635  		children, err := f.srv.FS.GetChildren(dirNode)
   636  		if err != nil {
   637  			return errors.Wrap(err, "purgeCheck GetChildren failed")
   638  		}
   639  		if len(children) > 0 {
   640  			return fs.ErrorDirectoryNotEmpty
   641  		}
   642  	}
   643  
   644  	waitEvent := f.srv.WaitEventsStart()
   645  
   646  	err = f.deleteNode(dirNode)
   647  	if err != nil {
   648  		return errors.Wrap(err, "delete directory node failed")
   649  	}
   650  
   651  	// Remove the root node if we just deleted it
   652  	if dirNode == rootNode {
   653  		f.clearRoot()
   654  	}
   655  
   656  	f.srv.WaitEvents(waitEvent, eventWaitTime)
   657  	return nil
   658  }
   659  
   660  // Rmdir deletes the root folder
   661  //
   662  // Returns an error if it isn't empty
   663  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   664  	return f.purgeCheck(dir, true)
   665  }
   666  
   667  // Precision return the precision of this Fs
   668  func (f *Fs) Precision() time.Duration {
   669  	return fs.ModTimeNotSupported
   670  }
   671  
   672  // Purge deletes all the files and the container
   673  //
   674  // Optional interface: Only implement this if you have a way of
   675  // deleting all the files quicker than just running Remove() on the
   676  // result of List()
   677  func (f *Fs) Purge(ctx context.Context) error {
   678  	return f.purgeCheck("", false)
   679  }
   680  
   681  // move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
   682  //
   683  // info will be updates
   684  func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
   685  	var (
   686  		dstFs                  = f
   687  		srcDirNode, dstDirNode *mega.Node
   688  		srcParent, dstParent   string
   689  		srcLeaf, dstLeaf       string
   690  	)
   691  
   692  	if dstRemote != "" {
   693  		// lookup or create the destination parent directory
   694  		dstDirNode, dstLeaf, err = dstFs.mkdirParent(dstRemote)
   695  	} else {
   696  		// find or create the parent of the root directory
   697  		absRoot := dstFs.srv.FS.GetRoot()
   698  		dstParent, dstLeaf = path.Split(dstFs.root)
   699  		dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
   700  	}
   701  	if err != nil {
   702  		return errors.Wrap(err, "server side move failed to make dst parent dir")
   703  	}
   704  
   705  	if srcRemote != "" {
   706  		// lookup the existing parent directory
   707  		srcDirNode, srcLeaf, err = srcFs.lookupParentDir(srcRemote)
   708  	} else {
   709  		// lookup the existing root parent
   710  		absRoot := srcFs.srv.FS.GetRoot()
   711  		srcParent, srcLeaf = path.Split(srcFs.root)
   712  		srcDirNode, err = f.findDir(absRoot, srcParent)
   713  	}
   714  	if err != nil {
   715  		return errors.Wrap(err, "server side move failed to lookup src parent dir")
   716  	}
   717  
   718  	// move the object into its new directory if required
   719  	if srcDirNode != dstDirNode && srcDirNode.GetHash() != dstDirNode.GetHash() {
   720  		//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
   721  		err = f.pacer.Call(func() (bool, error) {
   722  			err = f.srv.Move(info, dstDirNode)
   723  			return shouldRetry(err)
   724  		})
   725  		if err != nil {
   726  			return errors.Wrap(err, "server side move failed")
   727  		}
   728  	}
   729  
   730  	waitEvent := f.srv.WaitEventsStart()
   731  
   732  	// rename the object if required
   733  	if srcLeaf != dstLeaf {
   734  		//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
   735  		err = f.pacer.Call(func() (bool, error) {
   736  			err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
   737  			return shouldRetry(err)
   738  		})
   739  		if err != nil {
   740  			return errors.Wrap(err, "server side rename failed")
   741  		}
   742  	}
   743  
   744  	f.srv.WaitEvents(waitEvent, eventWaitTime)
   745  
   746  	return nil
   747  }
   748  
   749  // Move src to this remote using server side move operations.
   750  //
   751  // This is stored with the remote path given
   752  //
   753  // It returns the destination Object and a possible error
   754  //
   755  // Will only be called if src.Fs().Name() == f.Name()
   756  //
   757  // If it isn't possible then return fs.ErrorCantMove
   758  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   759  	dstFs := f
   760  
   761  	//log.Printf("Move %q -> %q", src.Remote(), remote)
   762  	srcObj, ok := src.(*Object)
   763  	if !ok {
   764  		fs.Debugf(src, "Can't move - not same remote type")
   765  		return nil, fs.ErrorCantMove
   766  	}
   767  
   768  	// Do the move
   769  	err := f.move(remote, srcObj.fs, srcObj.remote, srcObj.info)
   770  	if err != nil {
   771  		return nil, err
   772  	}
   773  
   774  	// Create a destination object
   775  	dstObj := &Object{
   776  		fs:     dstFs,
   777  		remote: remote,
   778  		info:   srcObj.info,
   779  	}
   780  	return dstObj, nil
   781  }
   782  
   783  // DirMove moves src, srcRemote to this remote at dstRemote
   784  // using server side move operations.
   785  //
   786  // Will only be called if src.Fs().Name() == f.Name()
   787  //
   788  // If it isn't possible then return fs.ErrorCantDirMove
   789  //
   790  // If destination exists then return fs.ErrorDirExists
   791  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
   792  	dstFs := f
   793  	srcFs, ok := src.(*Fs)
   794  	if !ok {
   795  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
   796  		return fs.ErrorCantDirMove
   797  	}
   798  
   799  	// find the source
   800  	info, err := srcFs.lookupDir(srcRemote)
   801  	if err != nil {
   802  		return err
   803  	}
   804  
   805  	// check the destination doesn't exist
   806  	_, err = dstFs.lookupDir(dstRemote)
   807  	if err == nil {
   808  		return fs.ErrorDirExists
   809  	} else if err != fs.ErrorDirNotFound {
   810  		return errors.Wrap(err, "DirMove error while checking dest directory")
   811  	}
   812  
   813  	// Do the move
   814  	err = f.move(dstRemote, srcFs, srcRemote, info)
   815  	if err != nil {
   816  		return err
   817  	}
   818  
   819  	// Clear src if it was the root
   820  	if srcRemote == "" {
   821  		srcFs.clearRoot()
   822  	}
   823  
   824  	return nil
   825  }
   826  
   827  // DirCacheFlush an optional interface to flush internal directory cache
   828  func (f *Fs) DirCacheFlush() {
   829  	// f.dirCache.ResetRoot()
   830  	// FIXME Flush the mega somehow?
   831  }
   832  
   833  // Hashes returns the supported hash sets.
   834  func (f *Fs) Hashes() hash.Set {
   835  	return hash.Set(hash.None)
   836  }
   837  
   838  // PublicLink generates a public link to the remote path (usually readable by anyone)
   839  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
   840  	root, err := f.findRoot(false)
   841  	if err != nil {
   842  		return "", errors.Wrap(err, "PublicLink failed to find root node")
   843  	}
   844  	node, err := f.findNode(root, remote)
   845  	if err != nil {
   846  		return "", errors.Wrap(err, "PublicLink failed to find path")
   847  	}
   848  	link, err = f.srv.Link(node, true)
   849  	if err != nil {
   850  		return "", errors.Wrap(err, "PublicLink failed to create link")
   851  	}
   852  	return link, nil
   853  }
   854  
   855  // MergeDirs merges the contents of all the directories passed
   856  // in into the first one and rmdirs the other directories.
   857  func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
   858  	if len(dirs) < 2 {
   859  		return nil
   860  	}
   861  	// find dst directory
   862  	dstDir := dirs[0]
   863  	dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
   864  	if dstDirNode == nil {
   865  		return errors.Errorf("MergeDirs failed to find node for: %v", dstDir)
   866  	}
   867  	for _, srcDir := range dirs[1:] {
   868  		// find src directory
   869  		srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
   870  		if srcDirNode == nil {
   871  			return errors.Errorf("MergeDirs failed to find node for: %v", srcDir)
   872  		}
   873  
   874  		// list the objects
   875  		infos := []*mega.Node{}
   876  		_, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool {
   877  			infos = append(infos, info)
   878  			return false
   879  		})
   880  		if err != nil {
   881  			return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
   882  		}
   883  		// move them into place
   884  		for _, info := range infos {
   885  			fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
   886  			err = f.pacer.Call(func() (bool, error) {
   887  				err = f.srv.Move(info, dstDirNode)
   888  				return shouldRetry(err)
   889  			})
   890  			if err != nil {
   891  				return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
   892  			}
   893  		}
   894  		// rmdir (into trash) the now empty source directory
   895  		fs.Infof(srcDir, "removing empty directory")
   896  		err = f.deleteNode(srcDirNode)
   897  		if err != nil {
   898  			return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
   899  		}
   900  	}
   901  	return nil
   902  }
   903  
   904  // About gets quota information
   905  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
   906  	var q mega.QuotaResp
   907  	var err error
   908  	err = f.pacer.Call(func() (bool, error) {
   909  		q, err = f.srv.GetQuota()
   910  		return shouldRetry(err)
   911  	})
   912  	if err != nil {
   913  		return nil, errors.Wrap(err, "failed to get Mega Quota")
   914  	}
   915  	usage := &fs.Usage{
   916  		Total: fs.NewUsageValue(int64(q.Mstrg)),           // quota of bytes that can be used
   917  		Used:  fs.NewUsageValue(int64(q.Cstrg)),           // bytes in use
   918  		Free:  fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
   919  	}
   920  	return usage, nil
   921  }
   922  
   923  // ------------------------------------------------------------
   924  
   925  // Fs returns the parent Fs
   926  func (o *Object) Fs() fs.Info {
   927  	return o.fs
   928  }
   929  
   930  // Return a string version
   931  func (o *Object) String() string {
   932  	if o == nil {
   933  		return "<nil>"
   934  	}
   935  	return o.remote
   936  }
   937  
   938  // Remote returns the remote path
   939  func (o *Object) Remote() string {
   940  	return o.remote
   941  }
   942  
   943  // Hash returns the hashes of an object
   944  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
   945  	return "", hash.ErrUnsupported
   946  }
   947  
   948  // Size returns the size of an object in bytes
   949  func (o *Object) Size() int64 {
   950  	return o.info.GetSize()
   951  }
   952  
   953  // setMetaData sets the metadata from info
   954  func (o *Object) setMetaData(info *mega.Node) (err error) {
   955  	if info.GetType() != mega.FILE {
   956  		return fs.ErrorNotAFile
   957  	}
   958  	o.info = info
   959  	return nil
   960  }
   961  
   962  // readMetaData gets the metadata if it hasn't already been fetched
   963  //
   964  // it also sets the info
   965  func (o *Object) readMetaData() (err error) {
   966  	if o.info != nil {
   967  		return nil
   968  	}
   969  	info, err := o.fs.readMetaDataForPath(o.remote)
   970  	if err != nil {
   971  		if err == fs.ErrorDirNotFound {
   972  			err = fs.ErrorObjectNotFound
   973  		}
   974  		return err
   975  	}
   976  	return o.setMetaData(info)
   977  }
   978  
   979  // ModTime returns the modification time of the object
   980  //
   981  //
   982  // It attempts to read the objects mtime and if that isn't present the
   983  // LastModified returned in the http headers
   984  func (o *Object) ModTime(ctx context.Context) time.Time {
   985  	return o.info.GetTimeStamp()
   986  }
   987  
   988  // SetModTime sets the modification time of the local fs object
   989  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
   990  	return fs.ErrorCantSetModTime
   991  }
   992  
   993  // Storable returns a boolean showing whether this object storable
   994  func (o *Object) Storable() bool {
   995  	return true
   996  }
   997  
   998  // openObject represents a download in progress
   999  type openObject struct {
  1000  	mu     sync.Mutex
  1001  	o      *Object
  1002  	d      *mega.Download
  1003  	id     int
  1004  	skip   int64
  1005  	chunk  []byte
  1006  	closed bool
  1007  }
  1008  
  1009  // get the next chunk
  1010  func (oo *openObject) getChunk() (err error) {
  1011  	if oo.id >= oo.d.Chunks() {
  1012  		return io.EOF
  1013  	}
  1014  	var chunk []byte
  1015  	err = oo.o.fs.pacer.Call(func() (bool, error) {
  1016  		chunk, err = oo.d.DownloadChunk(oo.id)
  1017  		return shouldRetry(err)
  1018  	})
  1019  	if err != nil {
  1020  		return err
  1021  	}
  1022  	oo.id++
  1023  	oo.chunk = chunk
  1024  	return nil
  1025  }
  1026  
  1027  // Read reads up to len(p) bytes into p.
  1028  func (oo *openObject) Read(p []byte) (n int, err error) {
  1029  	oo.mu.Lock()
  1030  	defer oo.mu.Unlock()
  1031  	if oo.closed {
  1032  		return 0, errors.New("read on closed file")
  1033  	}
  1034  	// Skip data at the start if requested
  1035  	for oo.skip > 0 {
  1036  		_, size, err := oo.d.ChunkLocation(oo.id)
  1037  		if err != nil {
  1038  			return 0, err
  1039  		}
  1040  		if oo.skip < int64(size) {
  1041  			break
  1042  		}
  1043  		oo.id++
  1044  		oo.skip -= int64(size)
  1045  	}
  1046  	if len(oo.chunk) == 0 {
  1047  		err = oo.getChunk()
  1048  		if err != nil {
  1049  			return 0, err
  1050  		}
  1051  		if oo.skip > 0 {
  1052  			oo.chunk = oo.chunk[oo.skip:]
  1053  			oo.skip = 0
  1054  		}
  1055  	}
  1056  	n = copy(p, oo.chunk)
  1057  	oo.chunk = oo.chunk[n:]
  1058  	return n, nil
  1059  }
  1060  
  1061  // Close closed the file - MAC errors are reported here
  1062  func (oo *openObject) Close() (err error) {
  1063  	oo.mu.Lock()
  1064  	defer oo.mu.Unlock()
  1065  	if oo.closed {
  1066  		return nil
  1067  	}
  1068  	err = oo.o.fs.pacer.Call(func() (bool, error) {
  1069  		err = oo.d.Finish()
  1070  		return shouldRetry(err)
  1071  	})
  1072  	if err != nil {
  1073  		return errors.Wrap(err, "failed to finish download")
  1074  	}
  1075  	oo.closed = true
  1076  	return nil
  1077  }
  1078  
  1079  // Open an object for read
  1080  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1081  	var offset, limit int64 = 0, -1
  1082  	for _, option := range options {
  1083  		switch x := option.(type) {
  1084  		case *fs.SeekOption:
  1085  			offset = x.Offset
  1086  		case *fs.RangeOption:
  1087  			offset, limit = x.Decode(o.Size())
  1088  		default:
  1089  			if option.Mandatory() {
  1090  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  1091  			}
  1092  		}
  1093  	}
  1094  
  1095  	var d *mega.Download
  1096  	err = o.fs.pacer.Call(func() (bool, error) {
  1097  		d, err = o.fs.srv.NewDownload(o.info)
  1098  		return shouldRetry(err)
  1099  	})
  1100  	if err != nil {
  1101  		return nil, errors.Wrap(err, "open download file failed")
  1102  	}
  1103  
  1104  	oo := &openObject{
  1105  		o:    o,
  1106  		d:    d,
  1107  		skip: offset,
  1108  	}
  1109  
  1110  	return readers.NewLimitedReadCloser(oo, limit), nil
  1111  }
  1112  
  1113  // Update the object with the contents of the io.Reader, modTime and size
  1114  //
  1115  // If existing is set then it updates the object rather than creating a new one
  1116  //
  1117  // The new object may have been created if an error is returned
  1118  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1119  	size := src.Size()
  1120  	if size < 0 {
  1121  		return errors.New("mega backend can't upload a file of unknown length")
  1122  	}
  1123  	//modTime := src.ModTime(ctx)
  1124  	remote := o.Remote()
  1125  
  1126  	// Create the parent directory
  1127  	dirNode, leaf, err := o.fs.mkdirParent(remote)
  1128  	if err != nil {
  1129  		return errors.Wrap(err, "update make parent dir failed")
  1130  	}
  1131  
  1132  	var u *mega.Upload
  1133  	err = o.fs.pacer.Call(func() (bool, error) {
  1134  		u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
  1135  		return shouldRetry(err)
  1136  	})
  1137  	if err != nil {
  1138  		return errors.Wrap(err, "upload file failed to create session")
  1139  	}
  1140  
  1141  	// Upload the chunks
  1142  	// FIXME do this in parallel
  1143  	for id := 0; id < u.Chunks(); id++ {
  1144  		_, chunkSize, err := u.ChunkLocation(id)
  1145  		if err != nil {
  1146  			return errors.Wrap(err, "upload failed to read chunk location")
  1147  		}
  1148  		chunk := make([]byte, chunkSize)
  1149  		_, err = io.ReadFull(in, chunk)
  1150  		if err != nil {
  1151  			return errors.Wrap(err, "upload failed to read data")
  1152  		}
  1153  
  1154  		err = o.fs.pacer.Call(func() (bool, error) {
  1155  			err = u.UploadChunk(id, chunk)
  1156  			return shouldRetry(err)
  1157  		})
  1158  		if err != nil {
  1159  			return errors.Wrap(err, "upload file failed to upload chunk")
  1160  		}
  1161  	}
  1162  
  1163  	// Finish the upload
  1164  	var info *mega.Node
  1165  	err = o.fs.pacer.Call(func() (bool, error) {
  1166  		info, err = u.Finish()
  1167  		return shouldRetry(err)
  1168  	})
  1169  	if err != nil {
  1170  		return errors.Wrap(err, "failed to finish upload")
  1171  	}
  1172  
  1173  	// If the upload succeeded and the original object existed, then delete it
  1174  	if o.info != nil {
  1175  		err = o.fs.deleteNode(o.info)
  1176  		if err != nil {
  1177  			return errors.Wrap(err, "upload failed to remove old version")
  1178  		}
  1179  		o.info = nil
  1180  	}
  1181  
  1182  	return o.setMetaData(info)
  1183  }
  1184  
  1185  // Remove an object
  1186  func (o *Object) Remove(ctx context.Context) error {
  1187  	err := o.fs.deleteNode(o.info)
  1188  	if err != nil {
  1189  		return errors.Wrap(err, "Remove object failed")
  1190  	}
  1191  	return nil
  1192  }
  1193  
  1194  // ID returns the ID of the Object if known, or "" if not
  1195  func (o *Object) ID() string {
  1196  	return o.info.GetHash()
  1197  }
  1198  
  1199  // Check the interfaces are satisfied
  1200  var (
  1201  	_ fs.Fs              = (*Fs)(nil)
  1202  	_ fs.Purger          = (*Fs)(nil)
  1203  	_ fs.Mover           = (*Fs)(nil)
  1204  	_ fs.PutUncheckeder  = (*Fs)(nil)
  1205  	_ fs.DirMover        = (*Fs)(nil)
  1206  	_ fs.DirCacheFlusher = (*Fs)(nil)
  1207  	_ fs.PublicLinker    = (*Fs)(nil)
  1208  	_ fs.MergeDirser     = (*Fs)(nil)
  1209  	_ fs.Abouter         = (*Fs)(nil)
  1210  	_ fs.Object          = (*Object)(nil)
  1211  	_ fs.IDer            = (*Object)(nil)
  1212  )