github.com/cs3org/reva/v2@v2.27.7/pkg/storage/utils/decomposedfs/decomposedfs.go (about)

     1  // Copyright 2018-2021 CERN
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  //
    15  // In applying this license, CERN does not waive the privileges and immunities
    16  // granted to it by virtue of its status as an Intergovernmental Organization
    17  // or submit itself to any jurisdiction.
    18  
    19  package decomposedfs
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"io"
    25  	"math"
    26  	"net/url"
    27  	"path"
    28  	"path/filepath"
    29  	"strconv"
    30  	"strings"
    31  	"time"
    32  
    33  	user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
    34  	rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
    35  	provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
    36  	"github.com/jellydator/ttlcache/v2"
    37  	"github.com/pkg/errors"
    38  	"github.com/rs/zerolog"
    39  	tusd "github.com/tus/tusd/v2/pkg/handler"
    40  	microstore "go-micro.dev/v4/store"
    41  	"go.opentelemetry.io/otel"
    42  	"go.opentelemetry.io/otel/trace"
    43  	"golang.org/x/sync/errgroup"
    44  
    45  	ctxpkg "github.com/cs3org/reva/v2/pkg/ctx"
    46  	"github.com/cs3org/reva/v2/pkg/errtypes"
    47  	"github.com/cs3org/reva/v2/pkg/events"
    48  	"github.com/cs3org/reva/v2/pkg/logger"
    49  	"github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool"
    50  	"github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics"
    51  	"github.com/cs3org/reva/v2/pkg/storage"
    52  	"github.com/cs3org/reva/v2/pkg/storage/utils/chunking"
    53  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/aspects"
    54  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup"
    55  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata"
    56  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator"
    57  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node"
    58  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options"
    59  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/permissions"
    60  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/spaceidindex"
    61  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/timemanager"
    62  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/trashbin"
    63  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree"
    64  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload"
    65  	"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper"
    66  	"github.com/cs3org/reva/v2/pkg/storage/utils/filelocks"
    67  	"github.com/cs3org/reva/v2/pkg/storage/utils/templates"
    68  	"github.com/cs3org/reva/v2/pkg/storagespace"
    69  	"github.com/cs3org/reva/v2/pkg/store"
    70  	"github.com/cs3org/reva/v2/pkg/utils"
    71  )
    72  
    73  type CtxKey int
    74  
    75  const (
    76  	CtxKeySpaceGID CtxKey = iota
    77  )
    78  
    79  var (
    80  	tracer trace.Tracer
    81  
    82  	_registeredEvents = []events.Unmarshaller{
    83  		events.PostprocessingFinished{},
    84  		events.PostprocessingStepFinished{},
    85  		events.RestartPostprocessing{},
    86  	}
    87  )
    88  
    89  func init() {
    90  	tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs")
    91  }
    92  
    93  // Session is the interface that OcisSession implements. By combining tus.Upload,
    94  // storage.UploadSession and custom functions we can reuse the same struct throughout
    95  // the whole upload lifecycle.
    96  //
    97  // Some functions that are only used by decomposedfs are not yet part of this interface.
    98  // They might be added after more refactoring.
    99  type Session interface {
   100  	tusd.Upload
   101  	storage.UploadSession
   102  	upload.Session
   103  	LockID() string
   104  }
   105  
   106  type SessionStore interface {
   107  	New(ctx context.Context) *upload.OcisSession
   108  	List(ctx context.Context) ([]*upload.OcisSession, error)
   109  	Get(ctx context.Context, id string) (*upload.OcisSession, error)
   110  	Cleanup(ctx context.Context, session upload.Session, revertNodeMetadata, keepUpload, unmarkPostprocessing bool)
   111  }
   112  
   113  // Decomposedfs provides the base for decomposed filesystem implementations
   114  type Decomposedfs struct {
   115  	lu           node.PathLookup
   116  	tp           node.Tree
   117  	trashbin     trashbin.Trashbin
   118  	o            *options.Options
   119  	p            permissions.Permissions
   120  	um           usermapper.Mapper
   121  	chunkHandler *chunking.ChunkHandler
   122  	stream       events.Stream
   123  	sessionStore SessionStore
   124  
   125  	UserCache       *ttlcache.Cache
   126  	userSpaceIndex  *spaceidindex.Index
   127  	groupSpaceIndex *spaceidindex.Index
   128  	spaceTypeIndex  *spaceidindex.Index
   129  
   130  	log *zerolog.Logger
   131  }
   132  
   133  // NewDefault returns an instance with default components
   134  func NewDefault(m map[string]interface{}, bs tree.Blobstore, es events.Stream, log *zerolog.Logger) (storage.FS, error) {
   135  	if log == nil {
   136  		log = &zerolog.Logger{}
   137  	}
   138  
   139  	o, err := options.New(m)
   140  	if err != nil {
   141  		return nil, err
   142  	}
   143  
   144  	var lu *lookup.Lookup
   145  	switch o.MetadataBackend {
   146  	case "xattrs":
   147  		lu = lookup.New(metadata.NewXattrsBackend(o.Root, o.FileMetadataCache), o, &timemanager.Manager{})
   148  	case "messagepack":
   149  		lu = lookup.New(metadata.NewMessagePackBackend(o.Root, o.FileMetadataCache), o, &timemanager.Manager{})
   150  	default:
   151  		return nil, fmt.Errorf("unknown metadata backend %s, only 'messagepack' or 'xattrs' (default) supported", o.MetadataBackend)
   152  	}
   153  
   154  	tp := tree.New(lu, bs, o, store.Create(
   155  		store.Store(o.IDCache.Store),
   156  		store.TTL(o.IDCache.TTL),
   157  		store.Size(o.IDCache.Size),
   158  		microstore.Nodes(o.IDCache.Nodes...),
   159  		microstore.Database(o.IDCache.Database),
   160  		microstore.Table(o.IDCache.Table),
   161  		store.DisablePersistence(o.IDCache.DisablePersistence),
   162  		store.Authentication(o.IDCache.AuthUsername, o.IDCache.AuthPassword),
   163  	), log)
   164  
   165  	permissionsSelector, err := pool.PermissionsSelector(o.PermissionsSVC, pool.WithTLSMode(o.PermTLSMode))
   166  	if err != nil {
   167  		return nil, err
   168  	}
   169  
   170  	aspects := aspects.Aspects{
   171  		Lookup:            lu,
   172  		Tree:              tp,
   173  		Permissions:       permissions.NewPermissions(node.NewPermissions(lu), permissionsSelector),
   174  		EventStream:       es,
   175  		DisableVersioning: o.DisableVersioning,
   176  		Trashbin:          &DecomposedfsTrashbin{},
   177  	}
   178  
   179  	return New(o, aspects, log)
   180  }
   181  
   182  // New returns an implementation of the storage.FS interface that talks to
   183  // a local filesystem.
   184  func New(o *options.Options, aspects aspects.Aspects, log *zerolog.Logger) (storage.FS, error) {
   185  	if log == nil {
   186  		log = &zerolog.Logger{}
   187  	}
   188  
   189  	err := aspects.Tree.Setup()
   190  	if err != nil {
   191  		log.Error().Err(err).Msg("could not setup tree")
   192  		return nil, errors.Wrap(err, "could not setup tree")
   193  	}
   194  
   195  	// Run migrations & return
   196  	m := migrator.New(aspects.Lookup, log)
   197  	err = m.RunMigrations()
   198  	if err != nil {
   199  		log.Error().Err(err).Msg("could not migrate tree")
   200  		return nil, errors.Wrap(err, "could not migrate tree")
   201  	}
   202  
   203  	if o.MaxAcquireLockCycles != 0 {
   204  		filelocks.SetMaxLockCycles(o.MaxAcquireLockCycles)
   205  	}
   206  
   207  	if o.LockCycleDurationFactor != 0 {
   208  		filelocks.SetLockCycleDurationFactor(o.LockCycleDurationFactor)
   209  	}
   210  	userSpaceIndex := spaceidindex.New(filepath.Join(o.Root, "indexes"), "by-user-id")
   211  	err = userSpaceIndex.Init()
   212  	if err != nil {
   213  		return nil, err
   214  	}
   215  	groupSpaceIndex := spaceidindex.New(filepath.Join(o.Root, "indexes"), "by-group-id")
   216  	err = groupSpaceIndex.Init()
   217  	if err != nil {
   218  		return nil, err
   219  	}
   220  	spaceTypeIndex := spaceidindex.New(filepath.Join(o.Root, "indexes"), "by-type")
   221  	err = spaceTypeIndex.Init()
   222  	if err != nil {
   223  		return nil, err
   224  	}
   225  
   226  	if aspects.Trashbin == nil {
   227  		return nil, errors.New("need trashbin")
   228  	}
   229  	// set a null usermapper if we don't have one
   230  	if aspects.UserMapper == nil {
   231  		aspects.UserMapper = &usermapper.NullMapper{}
   232  	}
   233  
   234  	fs := &Decomposedfs{
   235  		tp:              aspects.Tree,
   236  		lu:              aspects.Lookup,
   237  		trashbin:        aspects.Trashbin,
   238  		o:               o,
   239  		p:               aspects.Permissions,
   240  		um:              aspects.UserMapper,
   241  		chunkHandler:    chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")),
   242  		stream:          aspects.EventStream,
   243  		UserCache:       ttlcache.NewCache(),
   244  		userSpaceIndex:  userSpaceIndex,
   245  		groupSpaceIndex: groupSpaceIndex,
   246  		spaceTypeIndex:  spaceTypeIndex,
   247  		log:             log,
   248  	}
   249  	fs.sessionStore = upload.NewSessionStore(fs, aspects, o.Root, o.AsyncFileUploads, o.Tokens, log)
   250  	if err = fs.trashbin.Setup(fs); err != nil {
   251  		return nil, err
   252  	}
   253  
   254  	if o.AsyncFileUploads {
   255  		if fs.stream == nil {
   256  			log.Error().Msg("need event stream for async file processing")
   257  			return nil, errors.New("need nats for async file processing")
   258  		}
   259  
   260  		ch, err := events.Consume(fs.stream, "dcfs", _registeredEvents...)
   261  		if err != nil {
   262  			return nil, err
   263  		}
   264  
   265  		if o.Events.NumConsumers <= 0 {
   266  			o.Events.NumConsumers = 1
   267  		}
   268  
   269  		for i := 0; i < o.Events.NumConsumers; i++ {
   270  			go fs.Postprocessing(ch)
   271  		}
   272  	}
   273  
   274  	return fs, nil
   275  }
   276  
   277  // Postprocessing starts the postprocessing result collector
   278  func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
   279  	ctx := context.TODO() // we should pass the trace id in the event and initialize the trace provider here
   280  	ctx, span := tracer.Start(ctx, "Postprocessing")
   281  	defer span.End()
   282  	log := logger.New()
   283  	for event := range ch {
   284  		switch ev := event.Event.(type) {
   285  		case events.PostprocessingFinished:
   286  			sublog := log.With().Str("event", "PostprocessingFinished").Str("uploadid", ev.UploadID).Logger()
   287  			session, err := fs.sessionStore.Get(ctx, ev.UploadID)
   288  			if err != nil {
   289  				sublog.Error().Err(err).Msg("Failed to get upload")
   290  				continue // NOTE: since we can't get the upload, we can't delete the blob
   291  			}
   292  
   293  			ctx = session.Context(ctx)
   294  
   295  			n, err := session.Node(ctx)
   296  			if err != nil {
   297  				sublog.Error().Err(err).Msg("could not read node")
   298  				continue
   299  			}
   300  			sublog = log.With().Str("spaceid", session.SpaceID()).Str("nodeid", session.NodeID()).Logger()
   301  			if !n.Exists {
   302  				sublog.Debug().Msg("node no longer exists")
   303  				fs.sessionStore.Cleanup(ctx, session, false, false, false)
   304  				continue
   305  			}
   306  
   307  			var (
   308  				failed             bool
   309  				revertNodeMetadata bool
   310  				keepUpload         bool
   311  			)
   312  			unmarkPostprocessing := true
   313  
   314  			switch ev.Outcome {
   315  			default:
   316  				sublog.Error().Str("outcome", string(ev.Outcome)).Msg("unknown postprocessing outcome - aborting")
   317  				fallthrough
   318  			case events.PPOutcomeAbort:
   319  				failed = true
   320  				revertNodeMetadata = true
   321  				keepUpload = true
   322  				metrics.UploadSessionsAborted.Inc()
   323  			case events.PPOutcomeContinue:
   324  				if err := session.Finalize(ctx); err != nil {
   325  					sublog.Error().Err(err).Msg("could not finalize upload")
   326  					failed = true
   327  					revertNodeMetadata = false
   328  					keepUpload = true
   329  					// keep postprocessing status so the upload is not deleted during housekeeping
   330  					unmarkPostprocessing = false
   331  				} else {
   332  					metrics.UploadSessionsFinalized.Inc()
   333  				}
   334  			case events.PPOutcomeDelete:
   335  				failed = true
   336  				revertNodeMetadata = true
   337  				metrics.UploadSessionsDeleted.Inc()
   338  			}
   339  
   340  			getParent := func() *node.Node {
   341  				p, err := n.Parent(ctx)
   342  				if err != nil {
   343  					sublog.Error().Err(err).Msg("could not read parent")
   344  					return nil
   345  				}
   346  				return p
   347  			}
   348  
   349  			now := time.Now()
   350  			if failed {
   351  				// if no other upload session is in progress (processing id != session id) or has finished (processing id == "")
   352  				latestSession, err := n.ProcessingID(ctx)
   353  				if err != nil {
   354  					sublog.Error().Err(err).Msg("reading node for session failed")
   355  				}
   356  				if latestSession == session.ID() {
   357  					// propagate reverted sizeDiff after failed postprocessing
   358  					if err := fs.tp.Propagate(ctx, n, -session.SizeDiff()); err != nil {
   359  						sublog.Error().Err(err).Msg("could not propagate tree size change")
   360  					}
   361  				}
   362  			} else if p := getParent(); p != nil {
   363  				// update parent tmtime to propagate etag change after successful postprocessing
   364  				_ = p.SetTMTime(ctx, &now)
   365  				if err := fs.tp.Propagate(ctx, p, 0); err != nil {
   366  					sublog.Error().Err(err).Msg("could not propagate etag change")
   367  				}
   368  			}
   369  
   370  			fs.sessionStore.Cleanup(ctx, session, revertNodeMetadata, keepUpload, unmarkPostprocessing)
   371  
   372  			var isVersion bool
   373  			if session.NodeExists() {
   374  				info, err := session.GetInfo(ctx)
   375  				if err == nil && info.MetaData["versionsPath"] != "" {
   376  					isVersion = true
   377  				}
   378  			}
   379  
   380  			if err := events.Publish(
   381  				ctx,
   382  				fs.stream,
   383  				events.UploadReady{
   384  					UploadID:      ev.UploadID,
   385  					Failed:        failed,
   386  					ExecutingUser: ev.ExecutingUser,
   387  					Filename:      ev.Filename,
   388  					FileRef: &provider.Reference{
   389  						ResourceId: &provider.ResourceId{
   390  							StorageId: session.ProviderID(),
   391  							SpaceId:   session.SpaceID(),
   392  							OpaqueId:  session.SpaceID(),
   393  						},
   394  						Path: utils.MakeRelativePath(filepath.Join(session.Dir(), session.Filename())),
   395  					},
   396  					Timestamp:         utils.TimeToTS(now),
   397  					SpaceOwner:        n.SpaceOwnerOrManager(ctx),
   398  					IsVersion:         isVersion,
   399  					ImpersonatingUser: ev.ImpersonatingUser,
   400  				},
   401  			); err != nil {
   402  				sublog.Error().Err(err).Msg("Failed to publish UploadReady event")
   403  			}
   404  		case events.RestartPostprocessing:
   405  			sublog := log.With().Str("event", "RestartPostprocessing").Str("uploadid", ev.UploadID).Logger()
   406  			session, err := fs.sessionStore.Get(ctx, ev.UploadID)
   407  			if err != nil {
   408  				sublog.Error().Err(err).Msg("Failed to get upload")
   409  				continue
   410  			}
   411  			n, err := session.Node(ctx)
   412  			if err != nil {
   413  				sublog.Error().Err(err).Msg("could not read node")
   414  				continue
   415  			}
   416  			sublog = log.With().Str("spaceid", session.SpaceID()).Str("nodeid", session.NodeID()).Logger()
   417  			s, err := session.URL(ctx)
   418  			if err != nil {
   419  				sublog.Error().Err(err).Msg("could not create url")
   420  				continue
   421  			}
   422  
   423  			metrics.UploadSessionsRestarted.Inc()
   424  
   425  			// restart postprocessing
   426  			if err := events.Publish(ctx, fs.stream, events.BytesReceived{
   427  				UploadID:      session.ID(),
   428  				URL:           s,
   429  				SpaceOwner:    n.SpaceOwnerOrManager(ctx),
   430  				ExecutingUser: &user.User{Id: &user.UserId{OpaqueId: "postprocessing-restart"}}, // send nil instead?
   431  				ResourceID:    &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID},
   432  				Filename:      session.Filename(),
   433  				Filesize:      uint64(session.Size()),
   434  			}); err != nil {
   435  				sublog.Error().Err(err).Msg("Failed to publish BytesReceived event")
   436  			}
   437  		case events.PostprocessingStepFinished:
   438  			sublog := log.With().Str("event", "PostprocessingStepFinished").Str("uploadid", ev.UploadID).Logger()
   439  			if ev.FinishedStep != events.PPStepAntivirus {
   440  				// atm we are only interested in antivirus results
   441  				continue
   442  			}
   443  
   444  			res := ev.Result.(events.VirusscanResult)
   445  			if res.ErrorMsg != "" {
   446  				// scan failed somehow
   447  				// Should we handle this here?
   448  				continue
   449  			}
   450  			sublog = log.With().Str("scan_description", res.Description).Bool("infected", res.Infected).Logger()
   451  
   452  			var n *node.Node
   453  			switch ev.UploadID {
   454  			case "":
   455  				// uploadid is empty -> this was an on-demand scan
   456  				/* ON DEMAND SCANNING NOT SUPPORTED ATM
   457  				ctx := ctxpkg.ContextSetUser(context.Background(), ev.ExecutingUser)
   458  				ref := &provider.Reference{ResourceId: ev.ResourceID}
   459  
   460  				no, err := fs.lu.NodeFromResource(ctx, ref)
   461  				if err != nil {
   462  					log.Error().Err(err).Interface("resourceID", ev.ResourceID).Msg("Failed to get node after scan")
   463  					continue
   464  
   465  				}
   466  				n = no
   467  				if ev.Outcome == events.PPOutcomeDelete {
   468  					// antivir wants us to delete the file. We must obey and need to
   469  
   470  					// check if there a previous versions existing
   471  					revs, err := fs.ListRevisions(ctx, ref)
   472  					if len(revs) == 0 {
   473  						if err != nil {
   474  							log.Error().Err(err).Interface("resourceID", ev.ResourceID).Msg("Failed to list revisions. Fallback to delete file")
   475  						}
   476  
   477  						// no versions -> trash file
   478  						err := fs.Delete(ctx, ref)
   479  						if err != nil {
   480  							log.Error().Err(err).Interface("resourceID", ev.ResourceID).Msg("Failed to delete infected resource")
   481  							continue
   482  						}
   483  
   484  						// now purge it from the recycle bin
   485  						if err := fs.PurgeRecycleItem(ctx, &provider.Reference{ResourceId: &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.SpaceID}}, n.ID, "/"); err != nil {
   486  							log.Error().Err(err).Interface("resourceID", ev.ResourceID).Msg("Failed to purge infected resource from trash")
   487  						}
   488  
   489  						// remove cache entry in gateway
   490  						fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
   491  						continue
   492  					}
   493  
   494  					// we have versions - find the newest
   495  					versions := make(map[uint64]string) // remember all versions - we need them later
   496  					var nv uint64
   497  					for _, v := range revs {
   498  						versions[v.Mtime] = v.Key
   499  						if v.Mtime > nv {
   500  							nv = v.Mtime
   501  						}
   502  					}
   503  
   504  					// restore newest version
   505  					if err := fs.RestoreRevision(ctx, ref, versions[nv]); err != nil {
   506  						log.Error().Err(err).Interface("resourceID", ev.ResourceID).Str("revision", versions[nv]).Msg("Failed to restore revision")
   507  						continue
   508  					}
   509  
   510  					// now find infected version
   511  					revs, err = fs.ListRevisions(ctx, ref)
   512  					if err != nil {
   513  						log.Error().Err(err).Interface("resourceID", ev.ResourceID).Msg("Error listing revisions after restore")
   514  					}
   515  
   516  					for _, v := range revs {
   517  						// we looking for a version that was previously not there
   518  						if _, ok := versions[v.Mtime]; ok {
   519  							continue
   520  						}
   521  
   522  						if err := fs.DeleteRevision(ctx, ref, v.Key); err != nil {
   523  							log.Error().Err(err).Interface("resourceID", ev.ResourceID).Str("revision", v.Key).Msg("Failed to delete revision")
   524  						}
   525  					}
   526  
   527  					// remove cache entry in gateway
   528  					fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
   529  					continue
   530  				}
   531  				*/
   532  			default:
   533  				// uploadid is not empty -> this is an async upload
   534  				session, err := fs.sessionStore.Get(ctx, ev.UploadID)
   535  				if err != nil {
   536  					sublog.Error().Err(err).Msg("Failed to get upload")
   537  					continue
   538  				}
   539  
   540  				n, err = session.Node(ctx)
   541  				if err != nil {
   542  					sublog.Error().Err(err).Msg("Failed to get node after scan")
   543  					continue
   544  				}
   545  				sublog = log.With().Str("spaceid", session.SpaceID()).Str("nodeid", session.NodeID()).Logger()
   546  
   547  				session.SetScanData(res.Description, res.Scandate)
   548  				if err := session.Persist(ctx); err != nil {
   549  					sublog.Error().Err(err).Msg("Failed to persist scan results")
   550  				}
   551  			}
   552  
   553  			if err := n.SetScanData(ctx, res.Description, res.Scandate); err != nil {
   554  				sublog.Error().Err(err).Msg("Failed to set scan results")
   555  				continue
   556  			}
   557  
   558  			metrics.UploadSessionsScanned.Inc()
   559  		default:
   560  			log.Error().Interface("event", ev).Msg("Unknown event")
   561  		}
   562  	}
   563  }
   564  
   565  // Shutdown shuts down the storage
   566  func (fs *Decomposedfs) Shutdown(ctx context.Context) error {
   567  	return nil
   568  }
   569  
   570  // GetQuota returns the quota available
   571  // TODO Document in the cs3 should we return quota or free space?
   572  func (fs *Decomposedfs) GetQuota(ctx context.Context, ref *provider.Reference) (total uint64, inUse uint64, remaining uint64, err error) {
   573  	ctx, span := tracer.Start(ctx, "GetQuota")
   574  	defer span.End()
   575  	var n *node.Node
   576  	if ref == nil {
   577  		err = errtypes.BadRequest("no space given")
   578  		return 0, 0, 0, err
   579  	}
   580  	if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
   581  		return 0, 0, 0, err
   582  	}
   583  
   584  	if !n.Exists {
   585  		err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
   586  		return 0, 0, 0, err
   587  	}
   588  
   589  	rp, err := fs.p.AssemblePermissions(ctx, n)
   590  	switch {
   591  	case err != nil:
   592  		return 0, 0, 0, err
   593  	case !rp.GetQuota && !fs.p.ListAllSpaces(ctx):
   594  		f, _ := storagespace.FormatReference(ref)
   595  		if rp.Stat {
   596  			return 0, 0, 0, errtypes.PermissionDenied(f)
   597  		}
   598  		return 0, 0, 0, errtypes.NotFound(f)
   599  	}
   600  
   601  	// FIXME move treesize & quota to fieldmask
   602  	ri, err := n.AsResourceInfo(ctx, rp, []string{"treesize", "quota"}, []string{}, true)
   603  	if err != nil {
   604  		return 0, 0, 0, err
   605  	}
   606  
   607  	quotaStr := node.QuotaUnknown
   608  	if ri.Opaque != nil && ri.Opaque.Map != nil && ri.Opaque.Map["quota"] != nil && ri.Opaque.Map["quota"].Decoder == "plain" {
   609  		quotaStr = string(ri.Opaque.Map["quota"].Value)
   610  	}
   611  
   612  	return fs.calculateTotalUsedRemaining(quotaStr, ri.Size)
   613  }
   614  
   615  func (fs *Decomposedfs) calculateTotalUsedRemaining(quotaStr string, inUse uint64) (uint64, uint64, uint64, error) {
   616  	var err error
   617  	var total uint64
   618  
   619  	remaining := uint64(math.MaxUint64)
   620  	switch quotaStr {
   621  	case node.QuotaUncalculated, node.QuotaUnknown:
   622  		// best we can do is return current total
   623  		// TODO indicate unlimited total? -> in opaque data?
   624  	case node.QuotaUnlimited:
   625  		total = 0
   626  	default:
   627  		total, err = strconv.ParseUint(quotaStr, 10, 64)
   628  		if err != nil {
   629  			return 0, 0, 0, err
   630  		}
   631  
   632  		switch {
   633  		case total > inUse:
   634  			remaining = total - inUse
   635  		case total <= inUse:
   636  			remaining = 0
   637  		}
   638  
   639  	}
   640  	return total, inUse, remaining, nil
   641  }
   642  
   643  // CreateHome creates a new home node for the given user
   644  func (fs *Decomposedfs) CreateHome(ctx context.Context) (err error) {
   645  	ctx, span := tracer.Start(ctx, "CreateHome")
   646  	defer span.End()
   647  	if fs.o.UserLayout == "" {
   648  		return errtypes.NotSupported("Decomposedfs: CreateHome() home supported disabled")
   649  	}
   650  
   651  	u := ctxpkg.ContextMustGetUser(ctx)
   652  	res, err := fs.CreateStorageSpace(ctx, &provider.CreateStorageSpaceRequest{
   653  		Type:  _spaceTypePersonal,
   654  		Owner: u,
   655  	})
   656  	if err != nil {
   657  		return err
   658  	}
   659  	if res.Status.Code != rpcv1beta1.Code_CODE_OK {
   660  		return errtypes.NewErrtypeFromStatus(res.Status)
   661  	}
   662  	return nil
   663  }
   664  
   665  // GetHome is called to look up the home path for a user
   666  // It is NOT supposed to return the internal path but the external path
   667  func (fs *Decomposedfs) GetHome(ctx context.Context) (string, error) {
   668  	ctx, span := tracer.Start(ctx, "GetHome")
   669  	defer span.End()
   670  	if fs.o.UserLayout == "" {
   671  		return "", errtypes.NotSupported("Decomposedfs: GetHome() home supported disabled")
   672  	}
   673  	u := ctxpkg.ContextMustGetUser(ctx)
   674  	layout := templates.WithUser(u, fs.o.UserLayout)
   675  	return filepath.Join(fs.o.Root, layout), nil // TODO use a namespace?
   676  }
   677  
   678  // GetPathByID returns the fn pointed by the file id, without the internal namespace
   679  func (fs *Decomposedfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) {
   680  	ctx, span := tracer.Start(ctx, "GetPathByID")
   681  	defer span.End()
   682  	n, err := fs.lu.NodeFromID(ctx, id)
   683  	if err != nil {
   684  		return "", err
   685  	}
   686  	rp, err := fs.p.AssemblePermissions(ctx, n)
   687  	switch {
   688  	case err != nil:
   689  		return "", err
   690  	case !rp.GetPath:
   691  		f := storagespace.FormatResourceID(id)
   692  		if rp.Stat {
   693  			return "", errtypes.PermissionDenied(f)
   694  		}
   695  		return "", errtypes.NotFound(f)
   696  	}
   697  
   698  	hp := func(n *node.Node) bool {
   699  		perms, err := fs.p.AssemblePermissions(ctx, n)
   700  		if err != nil {
   701  			return false
   702  		}
   703  		return perms.GetPath
   704  	}
   705  	return fs.lu.Path(ctx, n, hp)
   706  }
   707  
   708  // CreateDir creates the specified directory
   709  func (fs *Decomposedfs) CreateDir(ctx context.Context, ref *provider.Reference) (err error) {
   710  	ctx, span := tracer.Start(ctx, "CreateDir")
   711  	defer span.End()
   712  
   713  	name := path.Base(ref.Path)
   714  	if name == "" || name == "." || name == "/" {
   715  		return errtypes.BadRequest("Invalid path: " + ref.Path)
   716  	}
   717  
   718  	parentRef := &provider.Reference{
   719  		ResourceId: ref.ResourceId,
   720  		Path:       path.Dir(ref.Path),
   721  	}
   722  
   723  	// verify parent exists
   724  	var n *node.Node
   725  	if n, err = fs.lu.NodeFromResource(ctx, parentRef); err != nil {
   726  		if e, ok := err.(errtypes.NotFound); ok {
   727  			return errtypes.PreconditionFailed(e.Error())
   728  		}
   729  		return
   730  	}
   731  	// TODO check if user has access to root / space
   732  	if !n.Exists {
   733  		return errtypes.PreconditionFailed(parentRef.Path)
   734  	}
   735  
   736  	rp, err := fs.p.AssemblePermissions(ctx, n)
   737  	switch {
   738  	case err != nil:
   739  		return err
   740  	case !rp.CreateContainer:
   741  		f, _ := storagespace.FormatReference(ref)
   742  		if rp.Stat {
   743  			return errtypes.PermissionDenied(f)
   744  		}
   745  		return errtypes.NotFound(f)
   746  	}
   747  
   748  	// Set space owner in context
   749  	storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx))
   750  
   751  	// check lock
   752  	if err := n.CheckLock(ctx); err != nil {
   753  		return err
   754  	}
   755  
   756  	// verify child does not exist, yet
   757  	if n, err = n.Child(ctx, name); err != nil {
   758  		return
   759  	}
   760  	if n.Exists {
   761  		return errtypes.AlreadyExists(ref.Path)
   762  	}
   763  
   764  	if err = fs.tp.CreateDir(ctx, n); err != nil {
   765  		return
   766  	}
   767  
   768  	return
   769  }
   770  
   771  // TouchFile as defined in the storage.FS interface
   772  func (fs *Decomposedfs) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error {
   773  	ctx, span := tracer.Start(ctx, "TouchFile")
   774  	defer span.End()
   775  	parentRef := &provider.Reference{
   776  		ResourceId: ref.ResourceId,
   777  		Path:       path.Dir(ref.Path),
   778  	}
   779  
   780  	// verify parent exists
   781  	parent, err := fs.lu.NodeFromResource(ctx, parentRef)
   782  	if err != nil {
   783  		return errtypes.InternalError(err.Error())
   784  	}
   785  	if !parent.Exists {
   786  		return errtypes.NotFound(parentRef.Path)
   787  	}
   788  
   789  	n, err := fs.lu.NodeFromResource(ctx, ref)
   790  	if err != nil {
   791  		return errtypes.InternalError(err.Error())
   792  	}
   793  
   794  	rp, err := fs.p.AssemblePermissions(ctx, n)
   795  	switch {
   796  	case err != nil:
   797  		return err
   798  	case !rp.InitiateFileUpload:
   799  		f, _ := storagespace.FormatReference(ref)
   800  		if rp.Stat {
   801  			return errtypes.PermissionDenied(f)
   802  		}
   803  		return errtypes.NotFound(f)
   804  	}
   805  
   806  	// Set space owner in context
   807  	storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx))
   808  
   809  	// check lock
   810  	if err := n.CheckLock(ctx); err != nil {
   811  		return err
   812  	}
   813  	return fs.tp.TouchFile(ctx, n, markprocessing, mtime)
   814  }
   815  
   816  // CreateReference creates a reference as a node folder with the target stored in extended attributes
   817  // There is no difference between the /Shares folder and normal nodes because the storage is not supposed to be accessible
   818  // without the storage provider. In effect everything is a shadow namespace.
   819  // To mimic the eos and owncloud driver we only allow references as children of the "/Shares" folder
   820  // FIXME: This comment should explain briefly what a reference is in this context.
   821  func (fs *Decomposedfs) CreateReference(ctx context.Context, p string, targetURI *url.URL) (err error) {
   822  	return errtypes.NotSupported("not implemented")
   823  }
   824  
   825  // Move moves a resource from one reference to another
   826  func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) {
   827  	ctx, span := tracer.Start(ctx, "Move")
   828  	defer span.End()
   829  	var oldNode, newNode *node.Node
   830  	if oldNode, err = fs.lu.NodeFromResource(ctx, oldRef); err != nil {
   831  		return
   832  	}
   833  
   834  	if !oldNode.Exists {
   835  		err = errtypes.NotFound(filepath.Join(oldNode.ParentID, oldNode.Name))
   836  		return
   837  	}
   838  
   839  	orp, err := fs.p.AssemblePermissions(ctx, oldNode)
   840  	switch {
   841  	case err != nil:
   842  		return err
   843  	case !orp.Move:
   844  		f, _ := storagespace.FormatReference(oldRef)
   845  		if orp.Stat {
   846  			return errtypes.PermissionDenied(f)
   847  		}
   848  		return errtypes.NotFound(f)
   849  	}
   850  
   851  	if newNode, err = fs.lu.NodeFromResource(ctx, newRef); err != nil {
   852  		return
   853  	}
   854  	if newNode.Exists {
   855  		err = errtypes.AlreadyExists(filepath.Join(newNode.ParentID, newNode.Name))
   856  		return
   857  	}
   858  
   859  	nrp, err := fs.p.AssemblePermissions(ctx, newNode)
   860  	switch {
   861  	case err != nil:
   862  		return err
   863  	case oldNode.IsDir(ctx) && !nrp.CreateContainer:
   864  		f, _ := storagespace.FormatReference(newRef)
   865  		if nrp.Stat {
   866  			return errtypes.PermissionDenied(f)
   867  		}
   868  		return errtypes.NotFound(f)
   869  	case !oldNode.IsDir(ctx) && !nrp.InitiateFileUpload:
   870  		f, _ := storagespace.FormatReference(newRef)
   871  		if nrp.Stat {
   872  			return errtypes.PermissionDenied(f)
   873  		}
   874  		return errtypes.NotFound(f)
   875  	}
   876  
   877  	// Set space owner in context
   878  	storagespace.ContextSendSpaceOwnerID(ctx, newNode.SpaceOwnerOrManager(ctx))
   879  
   880  	// check lock on source
   881  	if err := oldNode.CheckLock(ctx); err != nil {
   882  		return err
   883  	}
   884  
   885  	if err := fs.tp.Move(ctx, oldNode, newNode); err != nil {
   886  		return err
   887  	}
   888  
   889  	fs.publishEvent(ctx, fs.moveEvent(ctx, oldRef, newRef, oldNode, newNode, orp, nrp))
   890  
   891  	return nil
   892  }
   893  
   894  // GetMD returns the metadata for the specified resource
   895  func (fs *Decomposedfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string, fieldMask []string) (ri *provider.ResourceInfo, err error) {
   896  	ctx, span := tracer.Start(ctx, "GetMD")
   897  	defer span.End()
   898  	var node *node.Node
   899  	if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
   900  		return
   901  	}
   902  
   903  	if !node.Exists {
   904  		err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name))
   905  		return
   906  	}
   907  
   908  	rp, err := fs.p.AssemblePermissions(ctx, node)
   909  	switch {
   910  	case err != nil:
   911  		return nil, err
   912  	case !rp.Stat:
   913  		f, _ := storagespace.FormatReference(ref)
   914  		return nil, errtypes.NotFound(f)
   915  	}
   916  
   917  	md, err := node.AsResourceInfo(ctx, rp, mdKeys, fieldMask, utils.IsRelativeReference(ref))
   918  	if err != nil {
   919  		return nil, err
   920  	}
   921  
   922  	addSpace := len(fieldMask) == 0
   923  	for _, p := range fieldMask {
   924  		if p == "space" || p == "*" {
   925  			addSpace = true
   926  			break
   927  		}
   928  	}
   929  	if addSpace {
   930  		if md.Space, err = fs.StorageSpaceFromNode(ctx, node, true); err != nil {
   931  			return nil, err
   932  		}
   933  	}
   934  
   935  	return md, nil
   936  }
   937  
   938  // ListFolder returns a list of resources in the specified folder
   939  func (fs *Decomposedfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string, fieldMask []string) ([]*provider.ResourceInfo, error) {
   940  	ctx, span := tracer.Start(ctx, "ListFolder")
   941  	defer span.End()
   942  	n, err := fs.lu.NodeFromResource(ctx, ref)
   943  	if err != nil {
   944  		return nil, err
   945  	}
   946  
   947  	if !n.Exists {
   948  		return nil, errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
   949  	}
   950  
   951  	rp, err := fs.p.AssemblePermissions(ctx, n)
   952  	switch {
   953  	case err != nil:
   954  		return nil, err
   955  	case !rp.ListContainer:
   956  		f, _ := storagespace.FormatReference(ref)
   957  		if rp.Stat {
   958  			return nil, errtypes.PermissionDenied(f)
   959  		}
   960  		return nil, errtypes.NotFound(f)
   961  	}
   962  
   963  	children, err := fs.tp.ListFolder(ctx, n)
   964  	if err != nil {
   965  		return nil, err
   966  	}
   967  
   968  	numWorkers := fs.o.MaxConcurrency
   969  	if len(children) < numWorkers {
   970  		numWorkers = len(children)
   971  	}
   972  	work := make(chan *node.Node, len(children))
   973  	results := make(chan *provider.ResourceInfo, len(children))
   974  
   975  	g, ctx := errgroup.WithContext(ctx)
   976  
   977  	// Distribute work
   978  	g.Go(func() error {
   979  		defer close(work)
   980  		for _, child := range children {
   981  			select {
   982  			case work <- child:
   983  			case <-ctx.Done():
   984  				return ctx.Err()
   985  			}
   986  		}
   987  		return nil
   988  	})
   989  
   990  	// Spawn workers that'll concurrently work the queue
   991  	for i := 0; i < numWorkers; i++ {
   992  		g.Go(func() error {
   993  			for child := range work {
   994  				np := rp
   995  				// add this childs permissions
   996  				pset, _ := child.PermissionSet(ctx)
   997  				node.AddPermissions(np, pset)
   998  				ri, err := child.AsResourceInfo(ctx, np, mdKeys, fieldMask, utils.IsRelativeReference(ref))
   999  				if err != nil {
  1000  					return errtypes.InternalError(err.Error())
  1001  				}
  1002  				select {
  1003  				case results <- ri:
  1004  				case <-ctx.Done():
  1005  					return ctx.Err()
  1006  				}
  1007  			}
  1008  			return nil
  1009  		})
  1010  	}
  1011  
  1012  	// Wait for things to settle down, then close results chan
  1013  	go func() {
  1014  		_ = g.Wait() // error is checked later
  1015  		close(results)
  1016  	}()
  1017  
  1018  	finfos := make([]*provider.ResourceInfo, len(children))
  1019  	i := 0
  1020  	for fi := range results {
  1021  		finfos[i] = fi
  1022  		i++
  1023  	}
  1024  
  1025  	if err := g.Wait(); err != nil {
  1026  		return nil, err
  1027  	}
  1028  
  1029  	return finfos, nil
  1030  }
  1031  
  1032  // Delete deletes the specified resource
  1033  func (fs *Decomposedfs) Delete(ctx context.Context, ref *provider.Reference) (err error) {
  1034  	ctx, span := tracer.Start(ctx, "Delete")
  1035  	defer span.End()
  1036  	var node *node.Node
  1037  	if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
  1038  		return
  1039  	}
  1040  	if !node.Exists {
  1041  		return errtypes.NotFound(filepath.Join(node.ParentID, node.Name))
  1042  	}
  1043  
  1044  	rp, err := fs.p.AssemblePermissions(ctx, node)
  1045  	switch {
  1046  	case err != nil:
  1047  		return err
  1048  	case !rp.Delete:
  1049  		f, _ := storagespace.FormatReference(ref)
  1050  		if rp.Stat {
  1051  			return errtypes.PermissionDenied(f)
  1052  		}
  1053  		return errtypes.NotFound(f)
  1054  	}
  1055  
  1056  	// Set space owner in context
  1057  	storagespace.ContextSendSpaceOwnerID(ctx, node.SpaceOwnerOrManager(ctx))
  1058  
  1059  	if err := node.CheckLock(ctx); err != nil {
  1060  		return err
  1061  	}
  1062  
  1063  	return fs.tp.Delete(ctx, node)
  1064  }
  1065  
  1066  // Download returns a reader to the specified resource
  1067  func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference, openReaderFunc func(md *provider.ResourceInfo) bool) (*provider.ResourceInfo, io.ReadCloser, error) {
  1068  	ctx, span := tracer.Start(ctx, "Download")
  1069  	defer span.End()
  1070  	// check if we are trying to download a revision
  1071  	// TODO the CS3 api should allow initiating a revision download
  1072  	if ref.ResourceId != nil && strings.Contains(ref.ResourceId.OpaqueId, node.RevisionIDDelimiter) {
  1073  		return fs.DownloadRevision(ctx, ref, ref.ResourceId.OpaqueId, openReaderFunc)
  1074  	}
  1075  
  1076  	n, err := fs.lu.NodeFromResource(ctx, ref)
  1077  	if err != nil {
  1078  		return nil, nil, err
  1079  	}
  1080  
  1081  	if !n.Exists {
  1082  		err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
  1083  		return nil, nil, err
  1084  	}
  1085  
  1086  	rp, err := fs.p.AssemblePermissions(ctx, n)
  1087  	switch {
  1088  	case err != nil:
  1089  		return nil, nil, err
  1090  	case !rp.InitiateFileDownload:
  1091  		f, _ := storagespace.FormatReference(ref)
  1092  		if rp.Stat {
  1093  			return nil, nil, errtypes.PermissionDenied(f)
  1094  		}
  1095  		return nil, nil, errtypes.NotFound(f)
  1096  	}
  1097  
  1098  	ri, err := n.AsResourceInfo(ctx, rp, nil, []string{"size", "mimetype", "etag"}, true)
  1099  	if err != nil {
  1100  		return nil, nil, err
  1101  	}
  1102  	var reader io.ReadCloser
  1103  	if openReaderFunc(ri) {
  1104  		reader, err = fs.tp.ReadBlob(n)
  1105  		if err != nil {
  1106  			return nil, nil, errors.Wrap(err, "Decomposedfs: error download blob '"+n.ID+"'")
  1107  		}
  1108  	}
  1109  	return ri, reader, nil
  1110  }
  1111  
  1112  // GetLock returns an existing lock on the given reference
  1113  func (fs *Decomposedfs) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) {
  1114  	ctx, span := tracer.Start(ctx, "GetLock")
  1115  	defer span.End()
  1116  	node, err := fs.lu.NodeFromResource(ctx, ref)
  1117  	if err != nil {
  1118  		return nil, errors.Wrap(err, "Decomposedfs: error resolving ref")
  1119  	}
  1120  
  1121  	if !node.Exists {
  1122  		err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name))
  1123  		return nil, err
  1124  	}
  1125  
  1126  	rp, err := fs.p.AssemblePermissions(ctx, node)
  1127  	switch {
  1128  	case err != nil:
  1129  		return nil, err
  1130  	case !rp.InitiateFileDownload:
  1131  		f, _ := storagespace.FormatReference(ref)
  1132  		if rp.Stat {
  1133  			return nil, errtypes.PermissionDenied(f)
  1134  		}
  1135  		return nil, errtypes.NotFound(f)
  1136  	}
  1137  
  1138  	return node.ReadLock(ctx, false)
  1139  }
  1140  
  1141  // SetLock puts a lock on the given reference
  1142  func (fs *Decomposedfs) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error {
  1143  	ctx, span := tracer.Start(ctx, "SetLock")
  1144  	defer span.End()
  1145  	node, err := fs.lu.NodeFromResource(ctx, ref)
  1146  	if err != nil {
  1147  		return errors.Wrap(err, "Decomposedfs: error resolving ref")
  1148  	}
  1149  
  1150  	if !node.Exists {
  1151  		return errtypes.NotFound(filepath.Join(node.ParentID, node.Name))
  1152  	}
  1153  
  1154  	rp, err := fs.p.AssemblePermissions(ctx, node)
  1155  	switch {
  1156  	case err != nil:
  1157  		return err
  1158  	case !rp.InitiateFileUpload:
  1159  		f, _ := storagespace.FormatReference(ref)
  1160  		if rp.Stat {
  1161  			return errtypes.PermissionDenied(f)
  1162  		}
  1163  		return errtypes.NotFound(f)
  1164  	}
  1165  
  1166  	return node.SetLock(ctx, lock)
  1167  }
  1168  
  1169  // RefreshLock refreshes an existing lock on the given reference
  1170  func (fs *Decomposedfs) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock, existingLockID string) error {
  1171  	ctx, span := tracer.Start(ctx, "RefreshLock")
  1172  	defer span.End()
  1173  	if lock.LockId == "" {
  1174  		return errtypes.BadRequest("missing lockid")
  1175  	}
  1176  
  1177  	node, err := fs.lu.NodeFromResource(ctx, ref)
  1178  	if err != nil {
  1179  		return errors.Wrap(err, "Decomposedfs: error resolving ref")
  1180  	}
  1181  
  1182  	if !node.Exists {
  1183  		return errtypes.NotFound(filepath.Join(node.ParentID, node.Name))
  1184  	}
  1185  
  1186  	rp, err := fs.p.AssemblePermissions(ctx, node)
  1187  	switch {
  1188  	case err != nil:
  1189  		return err
  1190  	case !rp.InitiateFileUpload:
  1191  		f, _ := storagespace.FormatReference(ref)
  1192  		if rp.Stat {
  1193  			return errtypes.PermissionDenied(f)
  1194  		}
  1195  		return errtypes.NotFound(f)
  1196  	}
  1197  
  1198  	return node.RefreshLock(ctx, lock, existingLockID)
  1199  }
  1200  
  1201  // Unlock removes an existing lock from the given reference
  1202  func (fs *Decomposedfs) Unlock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error {
  1203  	ctx, span := tracer.Start(ctx, "Unlock")
  1204  	defer span.End()
  1205  	if lock.LockId == "" {
  1206  		return errtypes.BadRequest("missing lockid")
  1207  	}
  1208  
  1209  	node, err := fs.lu.NodeFromResource(ctx, ref)
  1210  	if err != nil {
  1211  		return errors.Wrap(err, "Decomposedfs: error resolving ref")
  1212  	}
  1213  
  1214  	if !node.Exists {
  1215  		return errtypes.NotFound(filepath.Join(node.ParentID, node.Name))
  1216  	}
  1217  
  1218  	rp, err := fs.p.AssemblePermissions(ctx, node)
  1219  	switch {
  1220  	case err != nil:
  1221  		return err
  1222  	case !rp.InitiateFileUpload: // TODO do we need a dedicated permission?
  1223  		f, _ := storagespace.FormatReference(ref)
  1224  		if rp.Stat {
  1225  			return errtypes.PermissionDenied(f)
  1226  		}
  1227  		return errtypes.NotFound(f)
  1228  	}
  1229  
  1230  	return node.Unlock(ctx, lock)
  1231  }
  1232  
  1233  func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference, key, relativePath string) ([]*provider.RecycleItem, error) {
  1234  	return fs.trashbin.ListRecycle(ctx, ref, key, relativePath)
  1235  }
  1236  func (fs *Decomposedfs) RestoreRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string, restoreRef *provider.Reference) error {
  1237  	return fs.trashbin.RestoreRecycleItem(ctx, ref, key, relativePath, restoreRef)
  1238  }
  1239  func (fs *Decomposedfs) PurgeRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string) error {
  1240  	return fs.trashbin.PurgeRecycleItem(ctx, ref, key, relativePath)
  1241  }
  1242  func (fs *Decomposedfs) EmptyRecycle(ctx context.Context, ref *provider.Reference) error {
  1243  	return fs.trashbin.EmptyRecycle(ctx, ref)
  1244  }
  1245  
  1246  func (fs *Decomposedfs) getNodePath(ctx context.Context, n *node.Node, perms *provider.ResourcePermissions) (string, error) {
  1247  	hp := func(n *node.Node) bool {
  1248  		return perms.GetGetPath()
  1249  	}
  1250  	return fs.lu.Path(ctx, n, hp)
  1251  }
  1252  
  1253  func (fs *Decomposedfs) refFromNode(ctx context.Context, n *node.Node, storageId string, perms *provider.ResourcePermissions) (*provider.Reference, error) {
  1254  	var err error
  1255  	if perms == nil {
  1256  		perms, err = fs.p.AssemblePermissions(ctx, n)
  1257  		if err != nil {
  1258  			return nil, err
  1259  		}
  1260  	}
  1261  	path, err := fs.getNodePath(ctx, n, perms)
  1262  	if err != nil {
  1263  		return nil, err
  1264  	}
  1265  	return &provider.Reference{
  1266  		ResourceId: &provider.ResourceId{
  1267  			StorageId: storageId,
  1268  			OpaqueId:  n.SpaceID,
  1269  			SpaceId:   n.SpaceID,
  1270  		},
  1271  		Path: path,
  1272  	}, nil
  1273  }