github.com/cs3org/reva/v2@v2.27.7/pkg/storage/fs/s3/s3.go (about)

     1  // Copyright 2018-2021 CERN
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  //
    15  // In applying this license, CERN does not waive the privileges and immunities
    16  // granted to it by virtue of its status as an Intergovernmental Organization
    17  // or submit itself to any jurisdiction.
    18  
    19  package s3
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"io"
    25  	"net/http"
    26  	"net/url"
    27  	"path"
    28  	"strings"
    29  	"time"
    30  
    31  	"github.com/aws/aws-sdk-go/aws"
    32  	"github.com/aws/aws-sdk-go/aws/awserr"
    33  	"github.com/aws/aws-sdk-go/aws/credentials"
    34  	"github.com/aws/aws-sdk-go/aws/session"
    35  	"github.com/aws/aws-sdk-go/service/s3"
    36  	"github.com/aws/aws-sdk-go/service/s3/s3manager"
    37  	provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
    38  	types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
    39  	"github.com/cs3org/reva/v2/pkg/appctx"
    40  	"github.com/cs3org/reva/v2/pkg/errtypes"
    41  	"github.com/cs3org/reva/v2/pkg/events"
    42  	"github.com/cs3org/reva/v2/pkg/mime"
    43  	"github.com/cs3org/reva/v2/pkg/storage"
    44  	"github.com/cs3org/reva/v2/pkg/storage/fs/registry"
    45  	"github.com/mitchellh/mapstructure"
    46  	"github.com/pkg/errors"
    47  	"github.com/rs/zerolog"
    48  )
    49  
    50  func init() {
    51  	registry.Register("s3", New)
    52  }
    53  
    54  type config struct {
    55  	Region    string `mapstructure:"region"`
    56  	AccessKey string `mapstructure:"access_key"`
    57  	SecretKey string `mapstructure:"secret_key"`
    58  	Endpoint  string `mapstructure:"endpoint"`
    59  	Bucket    string `mapstructure:"bucket"`
    60  	Prefix    string `mapstructure:"prefix"`
    61  }
    62  
    63  func parseConfig(m map[string]interface{}) (*config, error) {
    64  	c := &config{}
    65  	if err := mapstructure.Decode(m, c); err != nil {
    66  		err = errors.Wrap(err, "error decoding conf")
    67  		return nil, err
    68  	}
    69  	return c, nil
    70  }
    71  
    72  // New returns an implementation to of the storage.FS interface that talk to
    73  // a s3 api.
    74  func New(m map[string]interface{}, _ events.Stream, _ *zerolog.Logger) (storage.FS, error) {
    75  	c, err := parseConfig(m)
    76  	if err != nil {
    77  		return nil, err
    78  	}
    79  
    80  	awsConfig := aws.NewConfig().
    81  		WithHTTPClient(http.DefaultClient).
    82  		WithMaxRetries(aws.UseServiceDefaultRetries).
    83  		WithLogger(aws.NewDefaultLogger()).
    84  		WithLogLevel(aws.LogOff).
    85  		WithSleepDelay(time.Sleep).
    86  		WithCredentials(credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, "")).
    87  		WithEndpoint(c.Endpoint).
    88  		WithS3ForcePathStyle(true).
    89  		WithDisableSSL(true)
    90  
    91  	if c.Region != "" {
    92  		awsConfig.WithRegion(c.Region)
    93  	} else {
    94  		awsConfig.WithRegion("us-east-1")
    95  	}
    96  
    97  	sess, err := session.NewSession(awsConfig)
    98  	if err != nil {
    99  		return nil, err
   100  	}
   101  	if sess == nil {
   102  		return nil, errors.New("creating the S3 session")
   103  	}
   104  
   105  	s3Client := s3.New(sess)
   106  
   107  	return &s3FS{client: s3Client, config: c}, nil
   108  }
   109  
   110  func (fs *s3FS) Shutdown(ctx context.Context) error {
   111  	return nil
   112  }
   113  
   114  func (fs *s3FS) addRoot(p string) string {
   115  	np := path.Join(fs.config.Prefix, p)
   116  	return np
   117  }
   118  
   119  func (fs *s3FS) resolve(ctx context.Context, ref *provider.Reference) (string, error) {
   120  	if strings.HasPrefix(ref.Path, "/") {
   121  		return fs.addRoot(ref.GetPath()), nil
   122  	}
   123  
   124  	if ref.ResourceId != nil && ref.ResourceId.OpaqueId != "" {
   125  		fn := path.Join("/", strings.TrimPrefix(ref.ResourceId.OpaqueId, "fileid-"))
   126  		fn = fs.addRoot(fn)
   127  		return fn, nil
   128  	}
   129  
   130  	// reference is invalid
   131  	return "", fmt.Errorf("invalid reference %+v", ref)
   132  }
   133  
   134  func (fs *s3FS) removeRoot(np string) string {
   135  	p := strings.TrimPrefix(np, fs.config.Prefix)
   136  	if p == "" {
   137  		p = "/"
   138  	}
   139  	return p
   140  }
   141  
   142  type s3FS struct {
   143  	client *s3.S3
   144  	config *config
   145  }
   146  
   147  // permissionSet returns the permission set for the current user
   148  func (fs *s3FS) permissionSet(ctx context.Context) *provider.ResourcePermissions {
   149  	// TODO fix permissions for share recipients by traversing reading acls up to the root? cache acls for the parent node and reuse it
   150  	return &provider.ResourcePermissions{
   151  		// owner has all permissions
   152  		AddGrant:             true,
   153  		CreateContainer:      true,
   154  		Delete:               true,
   155  		GetPath:              true,
   156  		GetQuota:             true,
   157  		InitiateFileDownload: true,
   158  		InitiateFileUpload:   true,
   159  		ListContainer:        true,
   160  		ListFileVersions:     true,
   161  		ListGrants:           true,
   162  		ListRecycle:          true,
   163  		Move:                 true,
   164  		PurgeRecycle:         true,
   165  		RemoveGrant:          true,
   166  		RestoreFileVersion:   true,
   167  		RestoreRecycleItem:   true,
   168  		Stat:                 true,
   169  		UpdateGrant:          true,
   170  	}
   171  }
   172  
   173  func (fs *s3FS) normalizeObject(ctx context.Context, o *s3.Object, fn string) *provider.ResourceInfo {
   174  	fn = fs.removeRoot(path.Join("/", fn))
   175  	isDir := strings.HasSuffix(*o.Key, "/")
   176  	md := &provider.ResourceInfo{
   177  		Id: &provider.ResourceId{
   178  			OpaqueId: "fileid-" + strings.TrimPrefix(fn, "/"),
   179  		},
   180  		Path:          fn,
   181  		Type:          getResourceType(isDir),
   182  		Etag:          *o.ETag,
   183  		MimeType:      mime.Detect(isDir, fn),
   184  		PermissionSet: fs.permissionSet(ctx),
   185  		Size:          uint64(*o.Size),
   186  		Mtime: &types.Timestamp{
   187  			Seconds: uint64(o.LastModified.Unix()),
   188  		},
   189  	}
   190  	appctx.GetLogger(ctx).Debug().
   191  		Interface("object", o).
   192  		Interface("metadata", md).
   193  		Msg("normalized Object")
   194  	return md
   195  }
   196  
   197  func getResourceType(isDir bool) provider.ResourceType {
   198  	if isDir {
   199  		return provider.ResourceType_RESOURCE_TYPE_CONTAINER
   200  	}
   201  	return provider.ResourceType_RESOURCE_TYPE_CONTAINER
   202  }
   203  
   204  func (fs *s3FS) normalizeHead(ctx context.Context, o *s3.HeadObjectOutput, fn string) *provider.ResourceInfo {
   205  	fn = fs.removeRoot(path.Join("/", fn))
   206  	isDir := strings.HasSuffix(fn, "/")
   207  	md := &provider.ResourceInfo{
   208  		Id:            &provider.ResourceId{OpaqueId: "fileid-" + strings.TrimPrefix(fn, "/")},
   209  		Path:          fn,
   210  		Type:          getResourceType(isDir),
   211  		Etag:          *o.ETag,
   212  		MimeType:      mime.Detect(isDir, fn),
   213  		PermissionSet: fs.permissionSet(ctx),
   214  		Size:          uint64(*o.ContentLength),
   215  		Mtime: &types.Timestamp{
   216  			Seconds: uint64(o.LastModified.Unix()),
   217  		},
   218  	}
   219  	appctx.GetLogger(ctx).Debug().
   220  		Interface("head", o).
   221  		Interface("metadata", md).
   222  		Msg("normalized Head")
   223  	return md
   224  }
   225  func (fs *s3FS) normalizeCommonPrefix(ctx context.Context, p *s3.CommonPrefix) *provider.ResourceInfo {
   226  	fn := fs.removeRoot(path.Join("/", *p.Prefix))
   227  	md := &provider.ResourceInfo{
   228  		Id:            &provider.ResourceId{OpaqueId: "fileid-" + strings.TrimPrefix(fn, "/")},
   229  		Path:          fn,
   230  		Type:          getResourceType(true),
   231  		Etag:          "TODO(labkode)",
   232  		MimeType:      mime.Detect(true, fn),
   233  		PermissionSet: fs.permissionSet(ctx),
   234  		Size:          0,
   235  		Mtime: &types.Timestamp{
   236  			Seconds: 0,
   237  		},
   238  	}
   239  	appctx.GetLogger(ctx).Debug().
   240  		Interface("prefix", p).
   241  		Interface("metadata", md).
   242  		Msg("normalized CommonPrefix")
   243  	return md
   244  }
   245  
   246  // GetPathByID returns the path pointed by the file id
   247  // In this implementation the file id is that path of the file without the first slash
   248  // thus the file id always points to the filename
   249  func (fs *s3FS) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) {
   250  	return path.Join("/", strings.TrimPrefix(id.OpaqueId, "fileid-")), nil
   251  }
   252  
   253  func (fs *s3FS) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error {
   254  	return errtypes.NotSupported("s3: operation not supported")
   255  }
   256  
   257  func (fs *s3FS) DenyGrant(ctx context.Context, ref *provider.Reference, g *provider.Grantee) error {
   258  	return errtypes.NotSupported("s3: operation not supported")
   259  }
   260  
   261  func (fs *s3FS) ListGrants(ctx context.Context, ref *provider.Reference) ([]*provider.Grant, error) {
   262  	return nil, errtypes.NotSupported("s3: operation not supported")
   263  }
   264  
   265  func (fs *s3FS) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error {
   266  	return errtypes.NotSupported("s3: operation not supported")
   267  }
   268  
   269  func (fs *s3FS) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error {
   270  	return errtypes.NotSupported("s3: operation not supported")
   271  }
   272  
   273  func (fs *s3FS) GetQuota(ctx context.Context, ref *provider.Reference) (uint64, uint64, uint64, error) {
   274  	return 0, 0, 0, nil
   275  }
   276  
   277  func (fs *s3FS) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) error {
   278  	return errtypes.NotSupported("s3: operation not supported")
   279  }
   280  
   281  func (fs *s3FS) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) error {
   282  	return errtypes.NotSupported("s3: operation not supported")
   283  }
   284  
   285  // GetLock returns an existing lock on the given reference
   286  func (fs *s3FS) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) {
   287  	return nil, errtypes.NotSupported("unimplemented")
   288  }
   289  
   290  // SetLock puts a lock on the given reference
   291  func (fs *s3FS) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error {
   292  	return errtypes.NotSupported("unimplemented")
   293  }
   294  
   295  // RefreshLock refreshes an existing lock on the given reference
   296  func (fs *s3FS) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock, existingLockID string) error {
   297  	return errtypes.NotSupported("unimplemented")
   298  }
   299  
   300  // Unlock removes an existing lock from the given reference
   301  func (fs *s3FS) Unlock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error {
   302  	return errtypes.NotSupported("unimplemented")
   303  }
   304  
   305  func (fs *s3FS) CreateReference(ctx context.Context, path string, targetURI *url.URL) error {
   306  	// TODO(jfd):implement
   307  	return errtypes.NotSupported("s3: operation not supported")
   308  }
   309  
   310  func (fs *s3FS) GetHome(ctx context.Context) (string, error) {
   311  	return "", errtypes.NotSupported("eos: not supported")
   312  }
   313  
   314  func (fs *s3FS) CreateHome(ctx context.Context) error {
   315  	return errtypes.NotSupported("s3fs: not supported")
   316  }
   317  
   318  func (fs *s3FS) CreateDir(ctx context.Context, ref *provider.Reference) error {
   319  	log := appctx.GetLogger(ctx)
   320  
   321  	fn, err := fs.resolve(ctx, ref)
   322  	if err != nil {
   323  		return nil
   324  	}
   325  
   326  	fn = fs.addRoot(fn) + "/" // append / to indicate folder // TODO only if fn does not end in /
   327  
   328  	input := &s3.PutObjectInput{
   329  		Bucket:        aws.String(fs.config.Bucket),
   330  		Key:           aws.String(fn),
   331  		ContentType:   aws.String("application/octet-stream"),
   332  		ContentLength: aws.Int64(0),
   333  	}
   334  
   335  	result, err := fs.client.PutObject(input)
   336  	if err != nil {
   337  		log.Error().Err(err)
   338  		if aerr, ok := err.(awserr.Error); ok {
   339  			if aerr.Code() == s3.ErrCodeNoSuchBucket {
   340  				return errtypes.NotFound(ref.Path)
   341  			}
   342  		}
   343  		// FIXME we also need already exists error, webdav expects 405 MethodNotAllowed
   344  		return errors.Wrap(err, "s3fs: error creating dir "+ref.Path)
   345  	}
   346  
   347  	log.Debug().Interface("result", result) // todo cache etag?
   348  	return nil
   349  }
   350  
   351  // TouchFile as defined in the storage.FS interface
   352  func (fs *s3FS) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error {
   353  	return fmt.Errorf("unimplemented: TouchFile")
   354  }
   355  
   356  func (fs *s3FS) Delete(ctx context.Context, ref *provider.Reference) error {
   357  	log := appctx.GetLogger(ctx)
   358  
   359  	fn, err := fs.resolve(ctx, ref)
   360  	if err != nil {
   361  		return errors.Wrap(err, "error resolving ref")
   362  	}
   363  
   364  	// first we need to find out if fn is a dir or a file
   365  
   366  	_, err = fs.client.HeadObject(&s3.HeadObjectInput{
   367  		Bucket: aws.String(fs.config.Bucket),
   368  		Key:    aws.String(fn),
   369  	})
   370  	if err != nil {
   371  		log.Error().Err(err)
   372  		if aerr, ok := err.(awserr.Error); ok {
   373  			switch aerr.Code() {
   374  			case s3.ErrCodeNoSuchBucket:
   375  			case s3.ErrCodeNoSuchKey:
   376  				return errtypes.NotFound(fn)
   377  			}
   378  		}
   379  		// it might be a directory, so we can batch delete the prefix + /
   380  		iter := s3manager.NewDeleteListIterator(fs.client, &s3.ListObjectsInput{
   381  			Bucket: aws.String(fs.config.Bucket),
   382  			Prefix: aws.String(fn + "/"),
   383  		})
   384  		batcher := s3manager.NewBatchDeleteWithClient(fs.client)
   385  		if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil {
   386  			return err
   387  		}
   388  		// ok, we are done
   389  		return nil
   390  	}
   391  
   392  	// we found an object, let's get rid of it
   393  	result, err := fs.client.DeleteObject(&s3.DeleteObjectInput{
   394  		Bucket: aws.String(fs.config.Bucket),
   395  		Key:    aws.String(fn),
   396  	})
   397  	if err != nil {
   398  		log.Error().Err(err)
   399  		if aerr, ok := err.(awserr.Error); ok {
   400  			switch aerr.Code() {
   401  			case s3.ErrCodeNoSuchBucket:
   402  			case s3.ErrCodeNoSuchKey:
   403  				return errtypes.NotFound(fn)
   404  			}
   405  		}
   406  		return errors.Wrap(err, "s3fs: error deleting "+fn)
   407  	}
   408  
   409  	log.Debug().Interface("result", result)
   410  	return nil
   411  }
   412  
   413  // CreateStorageSpace creates a storage space
   414  func (fs *s3FS) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) {
   415  	return nil, errtypes.NotSupported("unimplemented: CreateStorageSpace")
   416  }
   417  
   418  func (fs *s3FS) moveObject(ctx context.Context, oldKey string, newKey string) error {
   419  
   420  	// Copy
   421  	// TODO double check CopyObject can deal with >5GB files.
   422  	// Docs say we need to use multipart upload: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
   423  	_, err := fs.client.CopyObject(&s3.CopyObjectInput{
   424  		Bucket:     aws.String(fs.config.Bucket),
   425  		CopySource: aws.String("/" + fs.config.Bucket + oldKey),
   426  		Key:        aws.String(newKey),
   427  	})
   428  	if aerr, ok := err.(awserr.Error); ok {
   429  		if aerr.Code() == s3.ErrCodeNoSuchBucket {
   430  			return errtypes.NotFound(oldKey)
   431  		}
   432  		return err
   433  	}
   434  	// TODO cache etag and mtime?
   435  
   436  	// Delete
   437  	_, err = fs.client.DeleteObject(&s3.DeleteObjectInput{
   438  		Bucket: aws.String(fs.config.Bucket),
   439  		Key:    aws.String(oldKey),
   440  	})
   441  	if aerr, ok := err.(awserr.Error); ok {
   442  		switch aerr.Code() {
   443  		case s3.ErrCodeNoSuchBucket:
   444  		case s3.ErrCodeNoSuchKey:
   445  			return errtypes.NotFound(oldKey)
   446  		}
   447  		return err
   448  	}
   449  	return nil
   450  }
   451  
   452  func (fs *s3FS) Move(ctx context.Context, oldRef, newRef *provider.Reference) error {
   453  	log := appctx.GetLogger(ctx)
   454  
   455  	fn, err := fs.resolve(ctx, oldRef)
   456  	if err != nil {
   457  		return errors.Wrap(err, "error resolving ref")
   458  	}
   459  
   460  	newName, err := fs.resolve(ctx, newRef)
   461  	if err != nil {
   462  		return errors.Wrap(err, "error resolving ref")
   463  	}
   464  
   465  	// first we need to find out if fn is a dir or a file
   466  
   467  	_, err = fs.client.HeadObject(&s3.HeadObjectInput{
   468  		Bucket: aws.String(fs.config.Bucket),
   469  		Key:    aws.String(fn),
   470  	})
   471  	if err != nil {
   472  		log.Error().Err(err)
   473  		if aerr, ok := err.(awserr.Error); ok {
   474  			switch aerr.Code() {
   475  			case s3.ErrCodeNoSuchBucket:
   476  			case s3.ErrCodeNoSuchKey:
   477  				return errtypes.NotFound(fn)
   478  			}
   479  		}
   480  
   481  		// move directory
   482  		input := &s3.ListObjectsV2Input{
   483  			Bucket: aws.String(fs.config.Bucket),
   484  			Prefix: aws.String(fn + "/"),
   485  		}
   486  		isTruncated := true
   487  
   488  		for isTruncated {
   489  			output, err := fs.client.ListObjectsV2(input)
   490  			if err != nil {
   491  				return errors.Wrap(err, "s3FS: error listing "+fn)
   492  			}
   493  
   494  			for _, o := range output.Contents {
   495  				log.Debug().
   496  					Interface("object", *o).
   497  					Str("fn", fn).
   498  					Msg("found Object")
   499  
   500  				err := fs.moveObject(ctx, *o.Key, strings.Replace(*o.Key, fn+"/", newName+"/", 1))
   501  				if err != nil {
   502  					return err
   503  				}
   504  			}
   505  
   506  			input.ContinuationToken = output.NextContinuationToken
   507  			isTruncated = *output.IsTruncated
   508  		}
   509  		// ok, we are done
   510  		return nil
   511  	}
   512  
   513  	// move single object
   514  	err = fs.moveObject(ctx, fn, newName)
   515  	if err != nil {
   516  		return err
   517  	}
   518  	return nil
   519  }
   520  
   521  func (fs *s3FS) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string, fieldMask []string) (*provider.ResourceInfo, error) {
   522  	log := appctx.GetLogger(ctx)
   523  
   524  	fn, err := fs.resolve(ctx, ref)
   525  	if err != nil {
   526  		return nil, errors.Wrap(err, "error resolving ref")
   527  	}
   528  
   529  	// first try a head, works for files
   530  	log.Debug().
   531  		Str("fn", fn).
   532  		Msg("trying HEAD")
   533  
   534  	input := &s3.HeadObjectInput{
   535  		Bucket: aws.String(fs.config.Bucket),
   536  		Key:    aws.String(fn),
   537  	}
   538  	output, err := fs.client.HeadObject(input)
   539  	if err != nil {
   540  		log.Error().Err(err)
   541  		if aerr, ok := err.(awserr.Error); ok {
   542  			switch aerr.Code() {
   543  			case s3.ErrCodeNoSuchBucket:
   544  			case s3.ErrCodeNoSuchKey:
   545  				return nil, errtypes.NotFound(fn)
   546  			}
   547  		}
   548  		log.Debug().
   549  			Str("fn", fn).
   550  			Msg("trying to list prefix")
   551  		// try by listing parent to find directory
   552  		input := &s3.ListObjectsV2Input{
   553  			Bucket:    aws.String(fs.config.Bucket),
   554  			Prefix:    aws.String(fn),
   555  			Delimiter: aws.String("/"), // limit to a single directory
   556  		}
   557  		isTruncated := true
   558  
   559  		for isTruncated {
   560  			output, err := fs.client.ListObjectsV2(input)
   561  			if err != nil {
   562  				return nil, errors.Wrap(err, "s3FS: error listing "+fn)
   563  			}
   564  
   565  			for i := range output.CommonPrefixes {
   566  				log.Debug().
   567  					Interface("object", output.CommonPrefixes[i]).
   568  					Str("fn", fn).
   569  					Msg("found CommonPrefix")
   570  				if *output.CommonPrefixes[i].Prefix == fn+"/" {
   571  					return fs.normalizeCommonPrefix(ctx, output.CommonPrefixes[i]), nil
   572  				}
   573  			}
   574  
   575  			input.ContinuationToken = output.NextContinuationToken
   576  			isTruncated = *output.IsTruncated
   577  		}
   578  		return nil, errtypes.NotFound(fn)
   579  	}
   580  
   581  	return fs.normalizeHead(ctx, output, fn), nil
   582  }
   583  
   584  func (fs *s3FS) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys, fieldMask []string) ([]*provider.ResourceInfo, error) {
   585  	fn, err := fs.resolve(ctx, ref)
   586  	if err != nil {
   587  		return nil, errors.Wrap(err, "error resolving ref")
   588  	}
   589  
   590  	input := &s3.ListObjectsV2Input{
   591  		Bucket:    aws.String(fs.config.Bucket),
   592  		Prefix:    aws.String(fn + "/"),
   593  		Delimiter: aws.String("/"), // limit to a single directory
   594  	}
   595  	isTruncated := true
   596  
   597  	finfos := []*provider.ResourceInfo{}
   598  
   599  	for isTruncated {
   600  		output, err := fs.client.ListObjectsV2(input)
   601  		if err != nil {
   602  			return nil, errors.Wrap(err, "s3FS: error listing "+fn)
   603  		}
   604  
   605  		for i := range output.CommonPrefixes {
   606  			finfos = append(finfos, fs.normalizeCommonPrefix(ctx, output.CommonPrefixes[i]))
   607  		}
   608  
   609  		for i := range output.Contents {
   610  			finfos = append(finfos, fs.normalizeObject(ctx, output.Contents[i], *output.Contents[i].Key))
   611  		}
   612  
   613  		input.ContinuationToken = output.NextContinuationToken
   614  		isTruncated = *output.IsTruncated
   615  	}
   616  	// TODO sort fileinfos?
   617  	return finfos, nil
   618  }
   619  
   620  func (fs *s3FS) Download(ctx context.Context, ref *provider.Reference, openReaderfunc func(*provider.ResourceInfo) bool) (*provider.ResourceInfo, io.ReadCloser, error) {
   621  	log := appctx.GetLogger(ctx)
   622  
   623  	fn, err := fs.resolve(ctx, ref)
   624  	if err != nil {
   625  		return nil, nil, errors.Wrap(err, "error resolving ref")
   626  	}
   627  
   628  	ri, err := fs.GetMD(ctx, ref, nil, []string{"size", "mimetype", "etag"})
   629  	if err != nil {
   630  		return nil, nil, errors.Wrap(err, "error getting metadata")
   631  	}
   632  
   633  	if !openReaderfunc(ri) {
   634  		return ri, nil, nil
   635  	}
   636  
   637  	// use GetObject instead of s3manager.Downloader:
   638  	// the result.Body is a ReadCloser, which allows streaming
   639  	// TODO double check we are not caching bytes in memory
   640  	r, err := fs.client.GetObject(&s3.GetObjectInput{
   641  		Bucket: aws.String(fs.config.Bucket),
   642  		Key:    aws.String(fn),
   643  	})
   644  	if err != nil {
   645  		log.Error().Err(err)
   646  		if aerr, ok := err.(awserr.Error); ok {
   647  			switch aerr.Code() {
   648  			case s3.ErrCodeNoSuchBucket:
   649  			case s3.ErrCodeNoSuchKey:
   650  				return nil, nil, errtypes.NotFound(fn)
   651  			}
   652  		}
   653  		return nil, nil, errors.Wrap(err, "s3fs: error deleting "+fn)
   654  	}
   655  	return ri, r.Body, nil
   656  }
   657  
   658  func (fs *s3FS) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) {
   659  	return nil, errtypes.NotSupported("list revisions")
   660  }
   661  
   662  func (fs *s3FS) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string, openReaderfunc func(*provider.ResourceInfo) bool) (*provider.ResourceInfo, io.ReadCloser, error) {
   663  	return nil, nil, errtypes.NotSupported("download revision")
   664  }
   665  
   666  func (fs *s3FS) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error {
   667  	return errtypes.NotSupported("restore revision")
   668  }
   669  
   670  func (fs *s3FS) PurgeRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string) error {
   671  	return errtypes.NotSupported("purge recycle item")
   672  }
   673  
   674  func (fs *s3FS) EmptyRecycle(ctx context.Context, ref *provider.Reference) error {
   675  	return errtypes.NotSupported("empty recycle")
   676  }
   677  
   678  func (fs *s3FS) ListRecycle(ctx context.Context, ref *provider.Reference, key, relativePath string) ([]*provider.RecycleItem, error) {
   679  	return nil, errtypes.NotSupported("list recycle")
   680  }
   681  
   682  func (fs *s3FS) RestoreRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string, restoreRef *provider.Reference) error {
   683  	return errtypes.NotSupported("restore recycle")
   684  }
   685  
   686  func (fs *s3FS) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter, unrestricted bool) ([]*provider.StorageSpace, error) {
   687  	return nil, errtypes.NotSupported("list storage spaces")
   688  }
   689  
   690  // UpdateStorageSpace updates a storage space
   691  func (fs *s3FS) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) {
   692  	return nil, errtypes.NotSupported("update storage space")
   693  }
   694  
   695  // DeleteStorageSpace deletes a storage space
   696  func (fs *s3FS) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorageSpaceRequest) error {
   697  	return errtypes.NotSupported("delete storage space")
   698  }