github.com/cs3org/reva/v2@v2.27.7/pkg/storage/fs/posix/tree/tree.go (about)

     1  // Copyright 2018-2021 CERN
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  //
    15  // In applying this license, CERN does not waive the privileges and immunities
    16  // granted to it by virtue of its status as an Intergovernmental Organization
    17  // or submit itself to any jurisdiction.
    18  
    19  package tree
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"io"
    25  	"io/fs"
    26  	"os"
    27  	"path/filepath"
    28  	"regexp"
    29  	"strings"
    30  
    31  	"github.com/google/uuid"
    32  	"github.com/pkg/errors"
    33  	"github.com/rs/zerolog"
    34  	"github.com/rs/zerolog/log"
    35  	"go-micro.dev/v4/store"
    36  	"go.opentelemetry.io/otel"
    37  	"go.opentelemetry.io/otel/trace"
    38  	"golang.org/x/sync/errgroup"
    39  
    40  	provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
    41  	"github.com/cs3org/reva/v2/pkg/appctx"
    42  	"github.com/cs3org/reva/v2/pkg/errtypes"
    43  	"github.com/cs3org/reva/v2/pkg/events"
    44  	"github.com/cs3org/reva/v2/pkg/storage/fs/posix/lookup"
    45  	"github.com/cs3org/reva/v2/pkg/storage/fs/posix/options"
    46  	"github.com/cs3org/reva/v2/pkg/storage/fs/posix/trashbin"
    47  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs"
    48  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata"
    49  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes"
    50  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node"
    51  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree/propagator"
    52  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper"
    53  	"github.com/cs3org/reva/v2/pkg/utils"
    54  )
    55  
    56  var tracer trace.Tracer
    57  
    58  func init() {
    59  	tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
    60  }
    61  
    62  // Blobstore defines an interface for storing blobs in a blobstore
    63  type Blobstore interface {
    64  	Upload(node *node.Node, source string) error
    65  	Download(node *node.Node) (io.ReadCloser, error)
    66  	Delete(node *node.Node) error
    67  }
    68  
    69  type Watcher interface {
    70  	Watch(path string)
    71  }
    72  
    73  type scanItem struct {
    74  	Path        string
    75  	ForceRescan bool
    76  	Recurse     bool
    77  }
    78  
    79  // Tree manages a hierarchical tree
    80  type Tree struct {
    81  	lookup     node.PathLookup
    82  	blobstore  Blobstore
    83  	trashbin   *trashbin.Trashbin
    84  	propagator propagator.Propagator
    85  
    86  	options *options.Options
    87  
    88  	userMapper    usermapper.Mapper
    89  	idCache       store.Store
    90  	watcher       Watcher
    91  	scanQueue     chan scanItem
    92  	scanDebouncer *ScanDebouncer
    93  
    94  	es  events.Stream
    95  	log *zerolog.Logger
    96  }
    97  
    98  // PermissionCheckFunc defined a function used to check resource permissions
    99  type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool
   100  
   101  // New returns a new instance of Tree
   102  func New(lu node.PathLookup, bs Blobstore, um usermapper.Mapper, trashbin *trashbin.Trashbin, o *options.Options, es events.Stream, cache store.Store, log *zerolog.Logger) (*Tree, error) {
   103  	scanQueue := make(chan scanItem)
   104  	t := &Tree{
   105  		lookup:     lu,
   106  		blobstore:  bs,
   107  		userMapper: um,
   108  		trashbin:   trashbin,
   109  		options:    o,
   110  		idCache:    cache,
   111  		propagator: propagator.New(lu, &o.Options, log),
   112  		scanQueue:  scanQueue,
   113  		scanDebouncer: NewScanDebouncer(o.ScanDebounceDelay, func(item scanItem) {
   114  			scanQueue <- item
   115  		}),
   116  		es:  es,
   117  		log: log,
   118  	}
   119  
   120  	watchPath := o.WatchPath
   121  	var err error
   122  	switch o.WatchType {
   123  	case "gpfswatchfolder":
   124  		t.watcher, err = NewGpfsWatchFolderWatcher(t, strings.Split(o.WatchFolderKafkaBrokers, ","), log)
   125  		if err != nil {
   126  			return nil, err
   127  		}
   128  	case "gpfsfileauditlogging":
   129  		t.watcher, err = NewGpfsFileAuditLoggingWatcher(t, o.WatchPath, log)
   130  		if err != nil {
   131  			return nil, err
   132  		}
   133  	default:
   134  		t.watcher = NewInotifyWatcher(t, log)
   135  		watchPath = o.Root
   136  	}
   137  
   138  	// Start watching for fs events and put them into the queue
   139  	if o.WatchFS {
   140  		go t.watcher.Watch(watchPath)
   141  		go t.workScanQueue()
   142  		go func() {
   143  			_ = t.WarmupIDCache(o.Root, true, false)
   144  		}()
   145  	}
   146  
   147  	return t, nil
   148  }
   149  
   150  func (t *Tree) PublishEvent(ev interface{}) {
   151  	if t.es == nil {
   152  		return
   153  	}
   154  
   155  	if err := events.Publish(context.Background(), t.es, ev); err != nil {
   156  		t.log.Error().Err(err).Interface("event", ev).Msg("failed to publish event")
   157  	}
   158  }
   159  
   160  // Setup prepares the tree structure
   161  func (t *Tree) Setup() error {
   162  	err := os.MkdirAll(t.options.Root, 0700)
   163  	if err != nil {
   164  		return err
   165  	}
   166  
   167  	err = os.MkdirAll(t.options.UploadDirectory, 0700)
   168  	if err != nil {
   169  		return err
   170  	}
   171  
   172  	return nil
   173  }
   174  
   175  // GetMD returns the metadata of a node in the tree
   176  func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) {
   177  	md, err := os.Stat(n.InternalPath())
   178  	if err != nil {
   179  		if errors.Is(err, fs.ErrNotExist) {
   180  			return nil, errtypes.NotFound(n.ID)
   181  		}
   182  		return nil, errors.Wrap(err, "tree: error stating "+n.ID)
   183  	}
   184  
   185  	return md, nil
   186  }
   187  
   188  // TouchFile creates a new empty file
   189  func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool, mtime string) error {
   190  	if n.Exists {
   191  		if markprocessing {
   192  			return n.SetXattr(ctx, prefixes.StatusPrefix, []byte(node.ProcessingStatus))
   193  		}
   194  
   195  		return errtypes.AlreadyExists(n.ID)
   196  	}
   197  
   198  	parentPath := n.ParentPath()
   199  	nodePath := filepath.Join(parentPath, n.Name)
   200  
   201  	// lock the meta file
   202  	unlock, err := t.lookup.MetadataBackend().Lock(nodePath)
   203  	if err != nil {
   204  		return err
   205  	}
   206  	defer func() {
   207  		_ = unlock()
   208  	}()
   209  
   210  	if n.ID == "" {
   211  		n.ID = uuid.New().String()
   212  	}
   213  	n.SetType(provider.ResourceType_RESOURCE_TYPE_FILE)
   214  
   215  	// Set id in cache
   216  	if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), n.SpaceID, n.ID, nodePath); err != nil {
   217  		t.log.Error().Err(err).Str("spaceID", n.SpaceID).Str("id", n.ID).Str("path", nodePath).Msg("could not cache id")
   218  	}
   219  
   220  	if err := os.MkdirAll(filepath.Dir(nodePath), 0700); err != nil {
   221  		return errors.Wrap(err, "Decomposedfs: error creating node")
   222  	}
   223  	_, err = os.Create(nodePath)
   224  	if err != nil {
   225  		return errors.Wrap(err, "Decomposedfs: error creating node")
   226  	}
   227  
   228  	attributes := n.NodeMetadata(ctx)
   229  	attributes[prefixes.IDAttr] = []byte(n.ID)
   230  	if markprocessing {
   231  		attributes[prefixes.StatusPrefix] = []byte(node.ProcessingStatus)
   232  	}
   233  	if mtime != "" {
   234  		nodeMTime, err := utils.MTimeToTime(mtime)
   235  		if err != nil {
   236  			return err
   237  		}
   238  		err = os.Chtimes(nodePath, nodeMTime, nodeMTime)
   239  		if err != nil {
   240  			return err
   241  		}
   242  	}
   243  
   244  	err = n.SetXattrsWithContext(ctx, attributes, false)
   245  	if err != nil {
   246  		return err
   247  	}
   248  
   249  	return t.Propagate(ctx, n, 0)
   250  }
   251  
   252  // CreateDir creates a new directory entry in the tree
   253  func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
   254  	ctx, span := tracer.Start(ctx, "CreateDir")
   255  	defer span.End()
   256  	if n.Exists {
   257  		return errtypes.AlreadyExists(n.ID) // path?
   258  	}
   259  
   260  	// create a directory node
   261  	n.SetType(provider.ResourceType_RESOURCE_TYPE_CONTAINER)
   262  	if n.ID == "" {
   263  		n.ID = uuid.New().String()
   264  	}
   265  
   266  	err = t.createDirNode(ctx, n)
   267  	if err != nil {
   268  		return
   269  	}
   270  
   271  	return t.Propagate(ctx, n, 0)
   272  }
   273  
   274  // Move replaces the target with the source
   275  func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) (err error) {
   276  	if oldNode.SpaceID != newNode.SpaceID {
   277  		// WebDAV RFC https://www.rfc-editor.org/rfc/rfc4918#section-9.9.4 says to use
   278  		// > 502 (Bad Gateway) - This may occur when the destination is on another
   279  		// > server and the destination server refuses to accept the resource.
   280  		// > This could also occur when the destination is on another sub-section
   281  		// > of the same server namespace.
   282  		// but we only have a not supported error
   283  		return errtypes.NotSupported("cannot move across spaces")
   284  	}
   285  	// if target exists delete it without trashing it
   286  	if newNode.Exists {
   287  		// TODO make sure all children are deleted
   288  		if err := os.RemoveAll(newNode.InternalPath()); err != nil {
   289  			return errors.Wrap(err, "Decomposedfs: Move: error deleting target node "+newNode.ID)
   290  		}
   291  	}
   292  
   293  	// we are moving the node to a new parent, any target has been removed
   294  	// bring old node to the new parent
   295  
   296  	// update target parentid and name
   297  	attribs := node.Attributes{}
   298  	attribs.SetString(prefixes.ParentidAttr, newNode.ParentID)
   299  	attribs.SetString(prefixes.NameAttr, newNode.Name)
   300  	if err := oldNode.SetXattrsWithContext(ctx, attribs, true); err != nil {
   301  		return errors.Wrap(err, "Decomposedfs: could not update old node attributes")
   302  	}
   303  
   304  	// rename node
   305  	err = os.Rename(
   306  		filepath.Join(oldNode.ParentPath(), oldNode.Name),
   307  		filepath.Join(newNode.ParentPath(), newNode.Name),
   308  	)
   309  	if err != nil {
   310  		return errors.Wrap(err, "Decomposedfs: could not move child")
   311  	}
   312  
   313  	// update the id cache
   314  	if newNode.ID == "" {
   315  		newNode.ID = oldNode.ID
   316  	}
   317  	if err := t.lookup.(*lookup.Lookup).CacheID(ctx, newNode.SpaceID, newNode.ID, filepath.Join(newNode.ParentPath(), newNode.Name)); err != nil {
   318  		t.log.Error().Err(err).Str("spaceID", newNode.SpaceID).Str("id", newNode.ID).Str("path", filepath.Join(newNode.ParentPath(), newNode.Name)).Msg("could not cache id")
   319  	}
   320  
   321  	// rename the lock (if it exists)
   322  	if _, err := os.Stat(oldNode.LockFilePath()); err == nil {
   323  		err = os.Rename(
   324  			filepath.Join(oldNode.ParentPath(), oldNode.Name+".lock"),
   325  			filepath.Join(newNode.ParentPath(), newNode.Name+".lock"),
   326  		)
   327  		if err != nil {
   328  			return errors.Wrap(err, "Decomposedfs: could not move lock")
   329  		}
   330  	}
   331  
   332  	// update id cache for the moved subtree.
   333  	if oldNode.IsDir(ctx) {
   334  		err = t.WarmupIDCache(filepath.Join(newNode.ParentPath(), newNode.Name), false, false)
   335  		if err != nil {
   336  			return err
   337  		}
   338  	}
   339  
   340  	// the size diff is the current treesize or blobsize of the old/source node
   341  	var sizeDiff int64
   342  	if oldNode.IsDir(ctx) {
   343  		treeSize, err := oldNode.GetTreeSize(ctx)
   344  		if err != nil {
   345  			return err
   346  		}
   347  		sizeDiff = int64(treeSize)
   348  	} else {
   349  		sizeDiff = oldNode.Blobsize
   350  	}
   351  
   352  	err = t.Propagate(ctx, oldNode, -sizeDiff)
   353  	if err != nil {
   354  		return errors.Wrap(err, "Decomposedfs: Move: could not propagate old node")
   355  	}
   356  	err = t.Propagate(ctx, newNode, sizeDiff)
   357  	if err != nil {
   358  		return errors.Wrap(err, "Decomposedfs: Move: could not propagate new node")
   359  	}
   360  	return nil
   361  }
   362  
   363  // ListFolder lists the content of a folder node
   364  func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) {
   365  	ctx, span := tracer.Start(ctx, "ListFolder")
   366  	defer span.End()
   367  	dir := n.InternalPath()
   368  
   369  	_, subspan := tracer.Start(ctx, "os.Open")
   370  	f, err := os.Open(dir)
   371  	subspan.End()
   372  	if err != nil {
   373  		if errors.Is(err, fs.ErrNotExist) {
   374  			return nil, errtypes.NotFound(dir)
   375  		}
   376  		return nil, errors.Wrap(err, "tree: error listing "+dir)
   377  	}
   378  	defer f.Close()
   379  
   380  	_, subspan = tracer.Start(ctx, "f.Readdirnames")
   381  	names, err := f.Readdirnames(0)
   382  	subspan.End()
   383  	if err != nil {
   384  		return nil, err
   385  	}
   386  
   387  	numWorkers := t.options.MaxConcurrency
   388  	if len(names) < numWorkers {
   389  		numWorkers = len(names)
   390  	}
   391  	work := make(chan string)
   392  	results := make(chan *node.Node)
   393  
   394  	g, ctx := errgroup.WithContext(ctx)
   395  
   396  	// Distribute work
   397  	g.Go(func() error {
   398  		defer close(work)
   399  		for _, name := range names {
   400  			if isLockFile(name) || isTrash(name) {
   401  				continue
   402  			}
   403  
   404  			select {
   405  			case work <- name:
   406  			case <-ctx.Done():
   407  				return ctx.Err()
   408  			}
   409  		}
   410  		return nil
   411  	})
   412  
   413  	// Spawn workers that'll concurrently work the queue
   414  	for i := 0; i < numWorkers; i++ {
   415  		g.Go(func() error {
   416  			// switch user if necessary
   417  			spaceGID, ok := ctx.Value(decomposedfs.CtxKeySpaceGID).(uint32)
   418  			if ok {
   419  				unscope, err := t.userMapper.ScopeUserByIds(-1, int(spaceGID))
   420  				if err != nil {
   421  					return errors.Wrap(err, "failed to scope user")
   422  				}
   423  				defer func() { _ = unscope() }()
   424  			}
   425  
   426  			for name := range work {
   427  				path := filepath.Join(dir, name)
   428  				nodeID, err := t.lookup.MetadataBackend().Get(ctx, path, prefixes.IDAttr)
   429  				if err != nil {
   430  					if metadata.IsAttrUnset(err) {
   431  						continue
   432  					}
   433  					return err
   434  				}
   435  
   436  				child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, string(nodeID), false, n.SpaceRoot, true)
   437  				if err != nil {
   438  					return err
   439  				}
   440  
   441  				// prevent listing denied resources
   442  				if !child.IsDenied(ctx) {
   443  					if child.SpaceRoot == nil {
   444  						child.SpaceRoot = n.SpaceRoot
   445  					}
   446  					select {
   447  					case results <- child:
   448  					case <-ctx.Done():
   449  						return ctx.Err()
   450  					}
   451  				}
   452  			}
   453  			return nil
   454  		})
   455  	}
   456  	// Wait for things to settle down, then close results chan
   457  	go func() {
   458  		_ = g.Wait() // error is checked later
   459  		close(results)
   460  	}()
   461  
   462  	retNodes := []*node.Node{}
   463  	for n := range results {
   464  		retNodes = append(retNodes, n)
   465  	}
   466  
   467  	if err := g.Wait(); err != nil {
   468  		return nil, err
   469  	}
   470  
   471  	return retNodes, nil
   472  }
   473  
   474  // Delete deletes a node in the tree by moving it to the trash
   475  func (t *Tree) Delete(ctx context.Context, n *node.Node) error {
   476  	path := n.InternalPath()
   477  
   478  	if !strings.HasPrefix(path, t.options.Root) {
   479  		return errtypes.InternalError("invalid internal path")
   480  	}
   481  
   482  	// remove entry from cache immediately to avoid inconsistencies
   483  	defer func() {
   484  		if err := t.idCache.Delete(path); err != nil {
   485  			log.Error().Err(err).Str("path", path).Msg("could not delete id from cache")
   486  		}
   487  	}()
   488  
   489  	if appctx.DeletingSharedResourceFromContext(ctx) {
   490  		src := filepath.Join(n.ParentPath(), n.Name)
   491  		return os.RemoveAll(src)
   492  	}
   493  
   494  	var sizeDiff int64
   495  	if n.IsDir(ctx) {
   496  		treesize, err := n.GetTreeSize(ctx)
   497  		if err != nil {
   498  			return err // TODO calculate treesize if it is not set
   499  		}
   500  		sizeDiff = -int64(treesize)
   501  	} else {
   502  		sizeDiff = -n.Blobsize
   503  	}
   504  
   505  	// Remove lock file if it exists
   506  	if err := os.Remove(n.LockFilePath()); err != nil {
   507  		log.Error().Err(err).Str("path", n.LockFilePath()).Msg("could not remove lock file")
   508  	}
   509  
   510  	err := t.trashbin.MoveToTrash(ctx, n, path)
   511  	if err != nil {
   512  		return err
   513  	}
   514  
   515  	return t.Propagate(ctx, n, sizeDiff)
   516  }
   517  
   518  // RestoreRecycleItemFunc returns a node and a function to restore it from the trash.
   519  func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPath string, targetNode *node.Node) (*node.Node, *node.Node, func() error, error) {
   520  	recycleNode, trashItem, deletedNodePath, origin, err := t.readRecycleItem(ctx, spaceid, key, trashPath)
   521  	if err != nil {
   522  		return nil, nil, nil, err
   523  	}
   524  
   525  	targetRef := &provider.Reference{
   526  		ResourceId: &provider.ResourceId{SpaceId: spaceid, OpaqueId: spaceid},
   527  		Path:       utils.MakeRelativePath(origin),
   528  	}
   529  
   530  	if targetNode == nil {
   531  		targetNode, err = t.lookup.NodeFromResource(ctx, targetRef)
   532  		if err != nil {
   533  			return nil, nil, nil, err
   534  		}
   535  	}
   536  
   537  	if err := targetNode.CheckLock(ctx); err != nil {
   538  		return nil, nil, nil, err
   539  	}
   540  
   541  	parent, err := targetNode.Parent(ctx)
   542  	if err != nil {
   543  		return nil, nil, nil, err
   544  	}
   545  
   546  	fn := func() error {
   547  		if targetNode.Exists {
   548  			return errtypes.AlreadyExists("origin already exists")
   549  		}
   550  
   551  		// add the entry for the parent dir
   552  		err = os.Symlink("../../../../../"+lookup.Pathify(recycleNode.ID, 4, 2), filepath.Join(targetNode.ParentPath(), targetNode.Name))
   553  		if err != nil {
   554  			return err
   555  		}
   556  
   557  		// rename to node only name, so it is picked up by id
   558  		nodePath := recycleNode.InternalPath()
   559  
   560  		// attempt to rename only if we're not in a subfolder
   561  		if deletedNodePath != nodePath {
   562  			err = os.Rename(deletedNodePath, nodePath)
   563  			if err != nil {
   564  				return err
   565  			}
   566  			err = t.lookup.MetadataBackend().Rename(deletedNodePath, nodePath)
   567  			if err != nil {
   568  				return err
   569  			}
   570  		}
   571  
   572  		targetNode.Exists = true
   573  
   574  		attrs := node.Attributes{}
   575  		attrs.SetString(prefixes.NameAttr, targetNode.Name)
   576  		if trashPath != "" {
   577  			// set ParentidAttr to restorePath's node parent id
   578  			attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
   579  		}
   580  
   581  		if err = recycleNode.SetXattrsWithContext(ctx, attrs, true); err != nil {
   582  			return errors.Wrap(err, "Decomposedfs: could not update recycle node")
   583  		}
   584  
   585  		// delete item link in trash
   586  		deletePath := trashItem
   587  		if trashPath != "" && trashPath != "/" {
   588  			resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem)
   589  			if err != nil {
   590  				return errors.Wrap(err, "Decomposedfs: could not resolve trash root")
   591  			}
   592  			deletePath = filepath.Join(resolvedTrashRoot, trashPath)
   593  		}
   594  		if err = os.Remove(deletePath); err != nil {
   595  			log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item")
   596  		}
   597  
   598  		var sizeDiff int64
   599  		if recycleNode.IsDir(ctx) {
   600  			treeSize, err := recycleNode.GetTreeSize(ctx)
   601  			if err != nil {
   602  				return err
   603  			}
   604  			sizeDiff = int64(treeSize)
   605  		} else {
   606  			sizeDiff = recycleNode.Blobsize
   607  		}
   608  		return t.Propagate(ctx, targetNode, sizeDiff)
   609  	}
   610  	return recycleNode, parent, fn, nil
   611  }
   612  
   613  // PurgeRecycleItemFunc returns a node and a function to purge it from the trash
   614  func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, path string) (*node.Node, func() error, error) {
   615  	rn, trashItem, deletedNodePath, _, err := t.readRecycleItem(ctx, spaceid, key, path)
   616  	if err != nil {
   617  		return nil, nil, err
   618  	}
   619  
   620  	fn := func() error {
   621  		if err := t.removeNode(ctx, deletedNodePath, rn); err != nil {
   622  			return err
   623  		}
   624  
   625  		// delete item link in trash
   626  		deletePath := trashItem
   627  		if path != "" && path != "/" {
   628  			resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem)
   629  			if err != nil {
   630  				return errors.Wrap(err, "Decomposedfs: could not resolve trash root")
   631  			}
   632  			deletePath = filepath.Join(resolvedTrashRoot, path)
   633  		}
   634  		if err = os.Remove(deletePath); err != nil {
   635  			log.Error().Err(err).Str("deletePath", deletePath).Msg("error deleting trash item")
   636  			return err
   637  		}
   638  
   639  		return nil
   640  	}
   641  
   642  	return rn, fn, nil
   643  }
   644  
   645  func (t *Tree) removeNode(ctx context.Context, path string, n *node.Node) error {
   646  	// delete the actual node
   647  	if err := utils.RemoveItem(path); err != nil {
   648  		log.Error().Err(err).Str("path", path).Msg("error purging node")
   649  		return err
   650  	}
   651  
   652  	if err := t.lookup.MetadataBackend().Purge(ctx, path); err != nil {
   653  		log.Error().Err(err).Str("path", t.lookup.MetadataBackend().MetadataPath(path)).Msg("error purging node metadata")
   654  		return err
   655  	}
   656  
   657  	// delete blob from blobstore
   658  	if n.BlobID != "" {
   659  		if err := t.DeleteBlob(n); err != nil {
   660  			log.Error().Err(err).Str("blobID", n.BlobID).Msg("error purging nodes blob")
   661  			return err
   662  		}
   663  	}
   664  
   665  	// delete revisions
   666  	// posixfs doesn't do revisions yet
   667  
   668  	return nil
   669  }
   670  
   671  // Propagate propagates changes to the root of the tree
   672  func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err error) {
   673  	// We do not propagate size diffs here but rely on the assimilation to take care of the tree sizes instead
   674  	return t.propagator.Propagate(ctx, n, sizeDiff)
   675  }
   676  
   677  // WriteBlob writes a blob to the blobstore
   678  func (t *Tree) WriteBlob(node *node.Node, source string) error {
   679  	return t.blobstore.Upload(node, source)
   680  }
   681  
   682  // ReadBlob reads a blob from the blobstore
   683  func (t *Tree) ReadBlob(node *node.Node) (io.ReadCloser, error) {
   684  	return t.blobstore.Download(node)
   685  }
   686  
   687  // DeleteBlob deletes a blob from the blobstore
   688  func (t *Tree) DeleteBlob(node *node.Node) error {
   689  	if node == nil {
   690  		return fmt.Errorf("could not delete blob, nil node was given")
   691  	}
   692  	return t.blobstore.Delete(node)
   693  }
   694  
   695  // BuildSpaceIDIndexEntry returns the entry for the space id index
   696  func (t *Tree) BuildSpaceIDIndexEntry(spaceID, nodeID string) string {
   697  	return nodeID
   698  }
   699  
   700  // ResolveSpaceIDIndexEntry returns the node id for the space id index entry
   701  func (t *Tree) ResolveSpaceIDIndexEntry(spaceid, entry string) (string, string, error) {
   702  	return spaceid, entry, nil
   703  }
   704  
   705  // InitNewNode initializes a new node
   706  func (t *Tree) InitNewNode(ctx context.Context, n *node.Node, fsize uint64) (metadata.UnlockFunc, error) {
   707  	_, span := tracer.Start(ctx, "InitNewNode")
   708  	defer span.End()
   709  	// create folder structure (if needed)
   710  	if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil {
   711  		return nil, err
   712  	}
   713  
   714  	// create and write lock new node metadata
   715  	unlock, err := t.lookup.MetadataBackend().Lock(n.InternalPath())
   716  	if err != nil {
   717  		return nil, err
   718  	}
   719  
   720  	// we also need to touch the actual node file here it stores the mtime of the resource
   721  	h, err := os.OpenFile(n.InternalPath(), os.O_CREATE|os.O_EXCL, 0600)
   722  	if err != nil {
   723  		if os.IsExist(err) {
   724  			return unlock, errtypes.AlreadyExists(n.InternalPath())
   725  		}
   726  		return unlock, err
   727  	}
   728  	h.Close()
   729  
   730  	if _, err := node.CheckQuota(ctx, n.SpaceRoot, false, 0, fsize); err != nil {
   731  		return unlock, err
   732  	}
   733  
   734  	return unlock, nil
   735  }
   736  
   737  // TODO check if node exists?
   738  func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
   739  	ctx, span := tracer.Start(ctx, "createDirNode")
   740  	defer span.End()
   741  
   742  	idcache := t.lookup.(*lookup.Lookup).IDCache
   743  	// create a directory node
   744  	parentPath, ok := idcache.Get(ctx, n.SpaceID, n.ParentID)
   745  	if !ok {
   746  		return errtypes.NotFound(n.ParentID)
   747  	}
   748  	path := filepath.Join(parentPath, n.Name)
   749  
   750  	// lock the meta file
   751  	unlock, err := t.lookup.MetadataBackend().Lock(path)
   752  	if err != nil {
   753  		return err
   754  	}
   755  	defer func() {
   756  		_ = unlock()
   757  	}()
   758  
   759  	if err := os.MkdirAll(path, 0700); err != nil {
   760  		return errors.Wrap(err, "Decomposedfs: error creating node")
   761  	}
   762  
   763  	if err := idcache.Set(ctx, n.SpaceID, n.ID, path); err != nil {
   764  		log.Error().Err(err).Str("spaceID", n.SpaceID).Str("id", n.ID).Str("path", path).Msg("could not cache id")
   765  	}
   766  
   767  	attributes := n.NodeMetadata(ctx)
   768  	attributes[prefixes.IDAttr] = []byte(n.ID)
   769  	attributes[prefixes.TreesizeAttr] = []byte("0") // initialize as empty, TODO why bother? if it is not set we could treat it as 0?
   770  	if t.options.TreeTimeAccounting || t.options.TreeSizeAccounting {
   771  		attributes[prefixes.PropagationAttr] = []byte("1") // mark the node for propagation
   772  	}
   773  	return n.SetXattrsWithContext(ctx, attributes, false)
   774  }
   775  
   776  var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`)
   777  
   778  // TODO refactor the returned params into Node properties? would make all the path transformations go away...
   779  func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) {
   780  	if key == "" {
   781  		return nil, "", "", "", errtypes.InternalError("key is empty")
   782  	}
   783  
   784  	backend := t.lookup.MetadataBackend()
   785  	var nodeID string
   786  
   787  	trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2))
   788  	resolvedTrashItem, err := filepath.EvalSymlinks(trashItem)
   789  	if err != nil {
   790  		return
   791  	}
   792  	deletedNodePath, err = filepath.EvalSymlinks(filepath.Join(resolvedTrashItem, path))
   793  	if err != nil {
   794  		return
   795  	}
   796  	nodeID = nodeIDRegep.ReplaceAllString(deletedNodePath, "$1")
   797  	nodeID = strings.ReplaceAll(nodeID, "/", "")
   798  
   799  	recycleNode = node.New(spaceID, nodeID, "", "", 0, "", provider.ResourceType_RESOURCE_TYPE_INVALID, nil, t.lookup)
   800  	recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID, false, nil, false)
   801  	if err != nil {
   802  		return
   803  	}
   804  	recycleNode.SetType(t.lookup.TypeFromPath(ctx, deletedNodePath))
   805  
   806  	var attrBytes []byte
   807  	if recycleNode.Type(ctx) == provider.ResourceType_RESOURCE_TYPE_FILE {
   808  		// lookup blobID in extended attributes
   809  		if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.BlobIDAttr); err == nil {
   810  			recycleNode.BlobID = string(attrBytes)
   811  		} else {
   812  			return
   813  		}
   814  
   815  		// lookup blobSize in extended attributes
   816  		if recycleNode.Blobsize, err = backend.GetInt64(ctx, deletedNodePath, prefixes.BlobsizeAttr); err != nil {
   817  			return
   818  		}
   819  	}
   820  
   821  	// lookup parent id in extended attributes
   822  	if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.ParentidAttr); err == nil {
   823  		recycleNode.ParentID = string(attrBytes)
   824  	} else {
   825  		return
   826  	}
   827  
   828  	// lookup name in extended attributes
   829  	if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.NameAttr); err == nil {
   830  		recycleNode.Name = string(attrBytes)
   831  	} else {
   832  		return
   833  	}
   834  
   835  	// get origin node, is relative to space root
   836  	origin = "/"
   837  
   838  	// lookup origin path in extended attributes
   839  	if attrBytes, err = backend.Get(ctx, resolvedTrashItem, prefixes.TrashOriginAttr); err == nil {
   840  		origin = filepath.Join(string(attrBytes), path)
   841  	} else {
   842  		log.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /")
   843  	}
   844  
   845  	return
   846  }
   847  
   848  func isLockFile(path string) bool {
   849  	return strings.HasSuffix(path, ".lock") || strings.HasSuffix(path, ".flock") || strings.HasSuffix(path, ".mlock")
   850  }
   851  
   852  func isTrash(path string) bool {
   853  	return strings.HasSuffix(path, ".trashinfo") || strings.HasSuffix(path, ".trashitem")
   854  }
   855  
   856  func (t *Tree) isUpload(path string) bool {
   857  	return strings.HasPrefix(path, t.options.UploadDirectory)
   858  }