github.com/shindo/goofys@v0.24.1-0.20210326210429-9e930f0b2d5c/internal/backend_s3.go (about)

     1  // Copyright 2019 Ka-Hing Cheung
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package internal
    16  
    17  import (
    18  	. "github.com/kahing/goofys/api/common"
    19  
    20  	"fmt"
    21  	"net/http"
    22  	"net/url"
    23  	"strconv"
    24  	"strings"
    25  	"sync/atomic"
    26  	"syscall"
    27  	"time"
    28  
    29  	"github.com/aws/aws-sdk-go/aws"
    30  	"github.com/aws/aws-sdk-go/aws/awserr"
    31  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    32  	"github.com/aws/aws-sdk-go/aws/credentials"
    33  	"github.com/aws/aws-sdk-go/aws/request"
    34  	"github.com/aws/aws-sdk-go/service/s3"
    35  
    36  	"github.com/jacobsa/fuse"
    37  )
    38  
    39  type S3Backend struct {
    40  	*s3.S3
    41  	cap Capabilities
    42  
    43  	bucket    string
    44  	awsConfig *aws.Config
    45  	flags     *FlagStorage
    46  	config    *S3Config
    47  	sseType   string
    48  
    49  	aws      bool
    50  	gcs      bool
    51  	v2Signer bool
    52  
    53  	readSem semaphore
    54  }
    55  
    56  func NewS3(bucket string, flags *FlagStorage, config *S3Config) (*S3Backend, error) {
    57  	awsConfig, err := config.ToAwsConfig(flags)
    58  	if err != nil {
    59  		return nil, err
    60  	}
    61  	s := &S3Backend{
    62  		bucket:    bucket,
    63  		awsConfig: awsConfig,
    64  		flags:     flags,
    65  		config:    config,
    66  		cap: Capabilities{
    67  			Name:             "s3",
    68  			MaxMultipartSize: 5 * 1024 * 1024 * 1024,
    69  		},
    70  	}
    71  
    72  	if flags.DebugS3 {
    73  		awsConfig.LogLevel = aws.LogLevel(aws.LogDebug | aws.LogDebugWithRequestErrors)
    74  	}
    75  
    76  	if flags.ReadConcurrency > 0 {
    77  		s.readSem = make(semaphore, flags.ReadConcurrency)
    78  	}
    79  
    80  	if config.UseKMS {
    81  		//SSE header string for KMS server-side encryption (SSE-KMS)
    82  		s.sseType = s3.ServerSideEncryptionAwsKms
    83  	} else if config.UseSSE {
    84  		//SSE header string for non-KMS server-side encryption (SSE-S3)
    85  		s.sseType = s3.ServerSideEncryptionAes256
    86  	}
    87  
    88  	s.newS3()
    89  	return s, nil
    90  }
    91  
    92  func (s *S3Backend) Bucket() string {
    93  	return s.bucket
    94  }
    95  
    96  func (s *S3Backend) Capabilities() *Capabilities {
    97  	return &s.cap
    98  }
    99  
   100  func addAcceptEncoding(req *request.Request) {
   101  	if req.HTTPRequest.Method == "GET" {
   102  		// we need "Accept-Encoding: identity" so that objects
   103  		// with content-encoding won't be automatically
   104  		// deflated, but we don't want to sign it because GCS
   105  		// doesn't like it
   106  		req.HTTPRequest.Header.Set("Accept-Encoding", "identity")
   107  	}
   108  }
   109  
   110  func addRequestPayer(req *request.Request) {
   111  	// "Requester Pays" is only applicable to these
   112  	// see https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
   113  	if req.HTTPRequest.Method == "GET" || req.HTTPRequest.Method == "HEAD" || req.HTTPRequest.Method == "POST" {
   114  		req.HTTPRequest.Header.Set("x-amz-request-payer", "requester")
   115  	}
   116  }
   117  
   118  func (s *S3Backend) setV2Signer(handlers *request.Handlers) {
   119  	handlers.Sign.Clear()
   120  	handlers.Sign.PushBack(SignV2)
   121  	handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
   122  }
   123  
   124  func (s *S3Backend) newS3() {
   125  	s.S3 = s3.New(s.config.Session, s.awsConfig)
   126  	if s.config.RequesterPays {
   127  		s.S3.Handlers.Build.PushBack(addRequestPayer)
   128  	}
   129  	if s.v2Signer {
   130  		s.setV2Signer(&s.S3.Handlers)
   131  	}
   132  	s.S3.Handlers.Sign.PushBack(addAcceptEncoding)
   133  }
   134  
   135  func (s *S3Backend) detectBucketLocationByHEAD() (err error, isAws bool) {
   136  	u := url.URL{
   137  		Scheme: "https",
   138  		Host:   "s3.amazonaws.com",
   139  		Path:   s.bucket,
   140  	}
   141  
   142  	if s.awsConfig.Endpoint != nil {
   143  		endpoint, err := url.Parse(*s.awsConfig.Endpoint)
   144  		if err != nil {
   145  			return err, false
   146  		}
   147  
   148  		u.Scheme = endpoint.Scheme
   149  		u.Host = endpoint.Host
   150  	}
   151  
   152  	var req *http.Request
   153  	var resp *http.Response
   154  
   155  	req, err = http.NewRequest("HEAD", u.String(), nil)
   156  	if err != nil {
   157  		return
   158  	}
   159  
   160  	allowFails := 3
   161  	for i := 0; i < allowFails; i++ {
   162  		resp, err = http.DefaultTransport.RoundTrip(req)
   163  		if err != nil {
   164  			return
   165  		}
   166  		if resp.StatusCode < 500 {
   167  			break
   168  		} else if resp.StatusCode == 503 && resp.Status == "503 Slow Down" {
   169  			time.Sleep(time.Duration(i+1) * time.Second)
   170  			// allow infinite retries for 503 slow down
   171  			allowFails += 1
   172  		}
   173  	}
   174  
   175  	region := resp.Header["X-Amz-Bucket-Region"]
   176  	server := resp.Header["Server"]
   177  
   178  	s3Log.Debugf("HEAD %v = %v %v", u.String(), resp.StatusCode, region)
   179  	if region == nil {
   180  		for k, v := range resp.Header {
   181  			s3Log.Debugf("%v = %v", k, v)
   182  		}
   183  	}
   184  	if server != nil && server[0] == "AmazonS3" {
   185  		isAws = true
   186  	}
   187  
   188  	switch resp.StatusCode {
   189  	case 200:
   190  		// note that this only happen if the bucket is in us-east-1
   191  		if len(s.config.Profile) == 0 {
   192  			s.awsConfig.Credentials = credentials.AnonymousCredentials
   193  			s3Log.Infof("anonymous bucket detected")
   194  		}
   195  	case 400:
   196  		err = fuse.EINVAL
   197  	case 403:
   198  		err = syscall.EACCES
   199  	case 404:
   200  		err = syscall.ENXIO
   201  	case 405:
   202  		err = syscall.ENOTSUP
   203  	default:
   204  		err = awserr.New(strconv.Itoa(resp.StatusCode), resp.Status, nil)
   205  	}
   206  
   207  	if len(region) != 0 {
   208  		if region[0] != *s.awsConfig.Region {
   209  			s3Log.Infof("Switching from region '%v' to '%v'",
   210  				*s.awsConfig.Region, region[0])
   211  			s.awsConfig.Region = &region[0]
   212  		}
   213  
   214  		// we detected a region, this is aws, the error is irrelevant
   215  		err = nil
   216  	}
   217  
   218  	return
   219  }
   220  
   221  func (s *S3Backend) testBucket(key string) (err error) {
   222  	_, err = s.HeadBlob(&HeadBlobInput{Key: key})
   223  	if err != nil {
   224  		if err == fuse.ENOENT {
   225  			err = nil
   226  		}
   227  	}
   228  
   229  	return
   230  }
   231  
   232  func (s *S3Backend) fallbackV2Signer() (err error) {
   233  	if s.v2Signer {
   234  		return fuse.EINVAL
   235  	}
   236  
   237  	s3Log.Infoln("Falling back to v2 signer")
   238  	s.v2Signer = true
   239  	s.newS3()
   240  	return
   241  }
   242  
   243  func (s *S3Backend) Init(key string) error {
   244  	var isAws bool
   245  	var err error
   246  
   247  	if !s.config.RegionSet {
   248  		err, isAws = s.detectBucketLocationByHEAD()
   249  		if err == nil {
   250  			// we detected a region header, this is probably AWS S3,
   251  			// or we can use anonymous access, or both
   252  			s.newS3()
   253  			s.aws = isAws
   254  		} else if err == syscall.ENXIO {
   255  			return fmt.Errorf("bucket %v does not exist", s.bucket)
   256  		} else {
   257  			// this is NOT AWS, we expect the request to fail with 403 if this is not
   258  			// an anonymous bucket
   259  			if err != syscall.EACCES {
   260  				s3Log.Errorf("Unable to access '%v': %v", s.bucket, err)
   261  			}
   262  		}
   263  	}
   264  
   265  	// try again with the credential to make sure
   266  	err = s.testBucket(key)
   267  	if err != nil {
   268  		if !isAws {
   269  			// EMC returns 403 because it doesn't support v4 signing
   270  			// swift3, ceph-s3 returns 400
   271  			// Amplidata just gives up and return 500
   272  			if err == syscall.EACCES || err == fuse.EINVAL || err == syscall.EAGAIN {
   273  				err = s.fallbackV2Signer()
   274  				if err != nil {
   275  					return err
   276  				}
   277  				err = s.testBucket(key)
   278  			}
   279  		}
   280  
   281  		if err != nil {
   282  			return err
   283  		}
   284  	}
   285  
   286  	return nil
   287  }
   288  
   289  func (s *S3Backend) ListObjectsV2(params *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, string, error) {
   290  	if s.aws {
   291  		req, resp := s.S3.ListObjectsV2Request(params)
   292  		err := req.Send()
   293  		if err != nil {
   294  			return nil, "", err
   295  		}
   296  		return resp, s.getRequestId(req), nil
   297  	} else {
   298  		v1 := s3.ListObjectsInput{
   299  			Bucket:       params.Bucket,
   300  			Delimiter:    params.Delimiter,
   301  			EncodingType: params.EncodingType,
   302  			MaxKeys:      params.MaxKeys,
   303  			Prefix:       params.Prefix,
   304  			RequestPayer: params.RequestPayer,
   305  		}
   306  		if params.StartAfter != nil {
   307  			v1.Marker = params.StartAfter
   308  		} else {
   309  			v1.Marker = params.ContinuationToken
   310  		}
   311  
   312  		objs, err := s.S3.ListObjects(&v1)
   313  		if err != nil {
   314  			return nil, "", err
   315  		}
   316  
   317  		count := int64(len(objs.Contents))
   318  		v2Objs := s3.ListObjectsV2Output{
   319  			CommonPrefixes:        objs.CommonPrefixes,
   320  			Contents:              objs.Contents,
   321  			ContinuationToken:     objs.Marker,
   322  			Delimiter:             objs.Delimiter,
   323  			EncodingType:          objs.EncodingType,
   324  			IsTruncated:           objs.IsTruncated,
   325  			KeyCount:              &count,
   326  			MaxKeys:               objs.MaxKeys,
   327  			Name:                  objs.Name,
   328  			NextContinuationToken: objs.NextMarker,
   329  			Prefix:                objs.Prefix,
   330  			StartAfter:            objs.Marker,
   331  		}
   332  
   333  		return &v2Objs, "", nil
   334  	}
   335  }
   336  
   337  func metadataToLower(m map[string]*string) map[string]*string {
   338  	if m != nil {
   339  		var toDelete []string
   340  		for k, v := range m {
   341  			lower := strings.ToLower(k)
   342  			if lower != k {
   343  				m[lower] = v
   344  				toDelete = append(toDelete, k)
   345  			}
   346  		}
   347  		for _, k := range toDelete {
   348  			delete(m, k)
   349  		}
   350  	}
   351  	return m
   352  }
   353  
   354  func (s *S3Backend) getRequestId(r *request.Request) string {
   355  	return r.HTTPResponse.Header.Get("x-amz-request-id") + ": " +
   356  		r.HTTPResponse.Header.Get("x-amz-id-2")
   357  }
   358  
   359  func (s *S3Backend) HeadBlob(param *HeadBlobInput) (*HeadBlobOutput, error) {
   360  	head := s3.HeadObjectInput{Bucket: &s.bucket,
   361  		Key: &param.Key,
   362  	}
   363  	if s.config.SseC != "" {
   364  		head.SSECustomerAlgorithm = PString("AES256")
   365  		head.SSECustomerKey = &s.config.SseC
   366  		head.SSECustomerKeyMD5 = &s.config.SseCDigest
   367  	}
   368  
   369  	req, resp := s.S3.HeadObjectRequest(&head)
   370  	err := req.Send()
   371  	if err != nil {
   372  		return nil, mapAwsError(err)
   373  	}
   374  	return &HeadBlobOutput{
   375  		BlobItemOutput: BlobItemOutput{
   376  			Key:          &param.Key,
   377  			ETag:         resp.ETag,
   378  			LastModified: resp.LastModified,
   379  			Size:         uint64(*resp.ContentLength),
   380  			StorageClass: resp.StorageClass,
   381  		},
   382  		ContentType: resp.ContentType,
   383  		Metadata:    metadataToLower(resp.Metadata),
   384  		IsDirBlob:   strings.HasSuffix(param.Key, "/"),
   385  		RequestId:   s.getRequestId(req),
   386  	}, nil
   387  }
   388  
   389  func (s *S3Backend) ListBlobs(param *ListBlobsInput) (*ListBlobsOutput, error) {
   390  	var maxKeys *int64
   391  
   392  	if param.MaxKeys != nil {
   393  		maxKeys = aws.Int64(int64(*param.MaxKeys))
   394  	}
   395  
   396  	resp, reqId, err := s.ListObjectsV2(&s3.ListObjectsV2Input{
   397  		Bucket:            &s.bucket,
   398  		Prefix:            param.Prefix,
   399  		Delimiter:         param.Delimiter,
   400  		MaxKeys:           maxKeys,
   401  		StartAfter:        param.StartAfter,
   402  		ContinuationToken: param.ContinuationToken,
   403  	})
   404  	if err != nil {
   405  		return nil, mapAwsError(err)
   406  	}
   407  
   408  	prefixes := make([]BlobPrefixOutput, 0)
   409  	items := make([]BlobItemOutput, 0)
   410  
   411  	for _, p := range resp.CommonPrefixes {
   412  		prefixes = append(prefixes, BlobPrefixOutput{Prefix: p.Prefix})
   413  	}
   414  	for _, i := range resp.Contents {
   415  		items = append(items, BlobItemOutput{
   416  			Key:          i.Key,
   417  			ETag:         i.ETag,
   418  			LastModified: i.LastModified,
   419  			Size:         uint64(*i.Size),
   420  			StorageClass: i.StorageClass,
   421  		})
   422  	}
   423  
   424  	return &ListBlobsOutput{
   425  		Prefixes:              prefixes,
   426  		Items:                 items,
   427  		NextContinuationToken: resp.NextContinuationToken,
   428  		IsTruncated:           *resp.IsTruncated,
   429  		RequestId:             reqId,
   430  	}, nil
   431  }
   432  
   433  func (s *S3Backend) DeleteBlob(param *DeleteBlobInput) (*DeleteBlobOutput, error) {
   434  	req, _ := s.DeleteObjectRequest(&s3.DeleteObjectInput{
   435  		Bucket: &s.bucket,
   436  		Key:    &param.Key,
   437  	})
   438  	err := req.Send()
   439  	if err != nil {
   440  		return nil, mapAwsError(err)
   441  	}
   442  	return &DeleteBlobOutput{s.getRequestId(req)}, nil
   443  }
   444  
   445  func (s *S3Backend) DeleteBlobs(param *DeleteBlobsInput) (*DeleteBlobsOutput, error) {
   446  	num_objs := len(param.Items)
   447  
   448  	var items s3.Delete
   449  	var objs = make([]*s3.ObjectIdentifier, num_objs)
   450  
   451  	for i, _ := range param.Items {
   452  		objs[i] = &s3.ObjectIdentifier{Key: &param.Items[i]}
   453  	}
   454  
   455  	// Add list of objects to delete to Delete object
   456  	items.SetObjects(objs)
   457  
   458  	req, _ := s.DeleteObjectsRequest(&s3.DeleteObjectsInput{
   459  		Bucket: &s.bucket,
   460  		Delete: &items,
   461  	})
   462  	err := req.Send()
   463  	if err != nil {
   464  		return nil, mapAwsError(err)
   465  	}
   466  
   467  	return &DeleteBlobsOutput{s.getRequestId(req)}, nil
   468  }
   469  
   470  func (s *S3Backend) RenameBlob(param *RenameBlobInput) (*RenameBlobOutput, error) {
   471  	return nil, syscall.ENOTSUP
   472  }
   473  
   474  func (s *S3Backend) mpuCopyPart(from string, to string, mpuId string, bytes string, part int64,
   475  	sem semaphore, srcEtag *string, etag **string, errout *error) {
   476  
   477  	defer sem.P(1)
   478  
   479  	// XXX use CopySourceIfUnmodifiedSince to ensure that
   480  	// we are copying from the same object
   481  	params := &s3.UploadPartCopyInput{
   482  		Bucket:            &s.bucket,
   483  		Key:               &to,
   484  		CopySource:        aws.String(pathEscape(from)),
   485  		UploadId:          &mpuId,
   486  		CopySourceRange:   &bytes,
   487  		CopySourceIfMatch: srcEtag,
   488  		PartNumber:        &part,
   489  	}
   490  	if s.config.SseC != "" {
   491  		params.SSECustomerAlgorithm = PString("AES256")
   492  		params.SSECustomerKey = &s.config.SseC
   493  		params.SSECustomerKeyMD5 = &s.config.SseCDigest
   494  		params.CopySourceSSECustomerAlgorithm = PString("AES256")
   495  		params.CopySourceSSECustomerKey = &s.config.SseC
   496  		params.CopySourceSSECustomerKeyMD5 = &s.config.SseCDigest
   497  	}
   498  
   499  	s3Log.Debug(params)
   500  
   501  	resp, err := s.UploadPartCopy(params)
   502  	if err != nil {
   503  		s3Log.Errorf("UploadPartCopy %v = %v", params, err)
   504  		*errout = mapAwsError(err)
   505  		return
   506  	}
   507  
   508  	*etag = resp.CopyPartResult.ETag
   509  	return
   510  }
   511  
   512  func sizeToParts(size int64) (int, int64) {
   513  	const MAX_S3_MPU_SIZE = 5 * 1024 * 1024 * 1024 * 1024
   514  	if size > MAX_S3_MPU_SIZE {
   515  		panic(fmt.Sprintf("object size: %v exceeds maximum S3 MPU size: %v", size, MAX_S3_MPU_SIZE))
   516  	}
   517  
   518  	// Use the maximum number of parts to allow the most server-side copy
   519  	// parallelism.
   520  	const MAX_PARTS = 10 * 1000
   521  	const MIN_PART_SIZE = 50 * 1024 * 1024
   522  	partSize := MaxInt64(size/(MAX_PARTS-1), MIN_PART_SIZE)
   523  
   524  	nParts := int(size / partSize)
   525  	if size%partSize != 0 {
   526  		nParts++
   527  	}
   528  
   529  	return nParts, partSize
   530  }
   531  
   532  func (s *S3Backend) mpuCopyParts(size int64, from string, to string, mpuId string,
   533  	srcEtag *string, etags []*string, partSize int64, err *error) {
   534  
   535  	rangeFrom := int64(0)
   536  	rangeTo := int64(0)
   537  
   538  	MAX_CONCURRENCY := MinInt(100, len(etags))
   539  	sem := make(semaphore, MAX_CONCURRENCY)
   540  	sem.P(MAX_CONCURRENCY)
   541  
   542  	for i := int64(1); rangeTo < size; i++ {
   543  		rangeFrom = rangeTo
   544  		rangeTo = i * partSize
   545  		if rangeTo > size {
   546  			rangeTo = size
   547  		}
   548  		bytes := fmt.Sprintf("bytes=%v-%v", rangeFrom, rangeTo-1)
   549  
   550  		sem.V(1)
   551  		go s.mpuCopyPart(from, to, mpuId, bytes, i, sem, srcEtag, &etags[i-1], err)
   552  	}
   553  
   554  	sem.V(MAX_CONCURRENCY)
   555  }
   556  
   557  func (s *S3Backend) copyObjectMultipart(size int64, from string, to string, mpuId string,
   558  	srcEtag *string, metadata map[string]*string, storageClass *string) (requestId string, err error) {
   559  	nParts, partSize := sizeToParts(size)
   560  	etags := make([]*string, nParts)
   561  
   562  	if mpuId == "" {
   563  		params := &s3.CreateMultipartUploadInput{
   564  			Bucket:       &s.bucket,
   565  			Key:          &to,
   566  			StorageClass: storageClass,
   567  			ContentType:  s.flags.GetMimeType(to),
   568  			Metadata:     metadataToLower(metadata),
   569  		}
   570  
   571  		if s.config.UseSSE {
   572  			params.ServerSideEncryption = &s.sseType
   573  			if s.config.UseKMS && s.config.KMSKeyID != "" {
   574  				params.SSEKMSKeyId = &s.config.KMSKeyID
   575  			}
   576  		} else if s.config.SseC != "" {
   577  			params.SSECustomerAlgorithm = PString("AES256")
   578  			params.SSECustomerKey = &s.config.SseC
   579  			params.SSECustomerKeyMD5 = &s.config.SseCDigest
   580  		}
   581  
   582  		if s.config.ACL != "" {
   583  			params.ACL = &s.config.ACL
   584  		}
   585  
   586  		resp, err := s.CreateMultipartUpload(params)
   587  		if err != nil {
   588  			return "", mapAwsError(err)
   589  		}
   590  
   591  		mpuId = *resp.UploadId
   592  	}
   593  
   594  	s.mpuCopyParts(size, from, to, mpuId, srcEtag, etags, partSize, &err)
   595  
   596  	if err != nil {
   597  		return
   598  	} else {
   599  		parts := make([]*s3.CompletedPart, nParts)
   600  		for i := 0; i < nParts; i++ {
   601  			parts[i] = &s3.CompletedPart{
   602  				ETag:       etags[i],
   603  				PartNumber: aws.Int64(int64(i + 1)),
   604  			}
   605  		}
   606  
   607  		params := &s3.CompleteMultipartUploadInput{
   608  			Bucket:   &s.bucket,
   609  			Key:      &to,
   610  			UploadId: &mpuId,
   611  			MultipartUpload: &s3.CompletedMultipartUpload{
   612  				Parts: parts,
   613  			},
   614  		}
   615  
   616  		s3Log.Debug(params)
   617  
   618  		req, _ := s.CompleteMultipartUploadRequest(params)
   619  		err = req.Send()
   620  		if err != nil {
   621  			s3Log.Errorf("Complete MPU %v = %v", params, err)
   622  			err = mapAwsError(err)
   623  		} else {
   624  			requestId = s.getRequestId(req)
   625  		}
   626  	}
   627  
   628  	return
   629  }
   630  
   631  func (s *S3Backend) CopyBlob(param *CopyBlobInput) (*CopyBlobOutput, error) {
   632  	metadataDirective := s3.MetadataDirectiveCopy
   633  	if param.Metadata != nil {
   634  		metadataDirective = s3.MetadataDirectiveReplace
   635  	}
   636  
   637  	COPY_LIMIT := uint64(5 * 1024 * 1024 * 1024)
   638  
   639  	if param.Size == nil || param.ETag == nil || (*param.Size > COPY_LIMIT &&
   640  		(param.Metadata == nil || param.StorageClass == nil)) {
   641  
   642  		params := &HeadBlobInput{Key: param.Source}
   643  		resp, err := s.HeadBlob(params)
   644  		if err != nil {
   645  			return nil, err
   646  		}
   647  
   648  		param.Size = &resp.Size
   649  		param.ETag = resp.ETag
   650  		if param.Metadata == nil {
   651  			param.Metadata = resp.Metadata
   652  		}
   653  		param.StorageClass = resp.StorageClass
   654  	}
   655  
   656  	if param.StorageClass == nil {
   657  		if *param.Size < 128*1024 && s.config.StorageClass == "STANDARD_IA" {
   658  			param.StorageClass = PString("STANDARD")
   659  		} else {
   660  			param.StorageClass = &s.config.StorageClass
   661  		}
   662  	}
   663  
   664  	from := s.bucket + "/" + param.Source
   665  
   666  	if !s.gcs && *param.Size > COPY_LIMIT {
   667  		reqId, err := s.copyObjectMultipart(int64(*param.Size), from, param.Destination, "", param.ETag, param.Metadata, param.StorageClass)
   668  		if err != nil {
   669  			return nil, err
   670  		}
   671  		return &CopyBlobOutput{reqId}, nil
   672  	}
   673  
   674  	params := &s3.CopyObjectInput{
   675  		Bucket:            &s.bucket,
   676  		CopySource:        aws.String(pathEscape(from)),
   677  		Key:               &param.Destination,
   678  		StorageClass:      param.StorageClass,
   679  		ContentType:       s.flags.GetMimeType(param.Destination),
   680  		Metadata:          metadataToLower(param.Metadata),
   681  		MetadataDirective: &metadataDirective,
   682  	}
   683  
   684  	s3Log.Debug(params)
   685  
   686  	if s.config.UseSSE {
   687  		params.ServerSideEncryption = &s.sseType
   688  		if s.config.UseKMS && s.config.KMSKeyID != "" {
   689  			params.SSEKMSKeyId = &s.config.KMSKeyID
   690  		}
   691  	} else if s.config.SseC != "" {
   692  		params.SSECustomerAlgorithm = PString("AES256")
   693  		params.SSECustomerKey = &s.config.SseC
   694  		params.SSECustomerKeyMD5 = &s.config.SseCDigest
   695  		params.CopySourceSSECustomerAlgorithm = PString("AES256")
   696  		params.CopySourceSSECustomerKey = &s.config.SseC
   697  		params.CopySourceSSECustomerKeyMD5 = &s.config.SseCDigest
   698  	}
   699  
   700  	if s.config.ACL != "" {
   701  		params.ACL = &s.config.ACL
   702  	}
   703  
   704  	req, _ := s.CopyObjectRequest(params)
   705  	// make a shallow copy of the client so we can change the
   706  	// timeout only for this request but still re-use the
   707  	// connection pool
   708  	c := *(req.Config.HTTPClient)
   709  	req.Config.HTTPClient = &c
   710  	req.Config.HTTPClient.Timeout = 15 * time.Minute
   711  	err := req.Send()
   712  	if err != nil {
   713  		s3Log.Errorf("CopyObject %v = %v", params, err)
   714  		return nil, mapAwsError(err)
   715  	}
   716  
   717  	return &CopyBlobOutput{s.getRequestId(req)}, nil
   718  }
   719  
   720  func (s *S3Backend) GetBlob(param *GetBlobInput) (*GetBlobOutput, error) {
   721  	get := s3.GetObjectInput{
   722  		Bucket: &s.bucket,
   723  		Key:    &param.Key,
   724  	}
   725  
   726  	if s.config.SseC != "" {
   727  		get.SSECustomerAlgorithm = PString("AES256")
   728  		get.SSECustomerKey = &s.config.SseC
   729  		get.SSECustomerKeyMD5 = &s.config.SseCDigest
   730  	}
   731  
   732  	if param.Start != 0 || param.Count != 0 {
   733  		var bytes string
   734  		if param.Count != 0 {
   735  			bytes = fmt.Sprintf("bytes=%v-%v", param.Start, param.Start+param.Count-1)
   736  		} else {
   737  			bytes = fmt.Sprintf("bytes=%v-", param.Start)
   738  		}
   739  		get.Range = &bytes
   740  	}
   741  	// TODO handle IfMatch
   742  
   743  	if s.readSem != nil {
   744  		s.readSem.P(1)
   745  		defer s.readSem.V(1)
   746  	}
   747  
   748  	req, resp := s.GetObjectRequest(&get)
   749  	err := req.Send()
   750  	if err != nil {
   751  		return nil, mapAwsError(err)
   752  	}
   753  
   754  	return &GetBlobOutput{
   755  		HeadBlobOutput: HeadBlobOutput{
   756  			BlobItemOutput: BlobItemOutput{
   757  				Key:          &param.Key,
   758  				ETag:         resp.ETag,
   759  				LastModified: resp.LastModified,
   760  				Size:         uint64(*resp.ContentLength),
   761  				StorageClass: resp.StorageClass,
   762  			},
   763  			ContentType: resp.ContentType,
   764  			Metadata:    metadataToLower(resp.Metadata),
   765  		},
   766  		Body:      resp.Body,
   767  		RequestId: s.getRequestId(req),
   768  	}, nil
   769  }
   770  
   771  func getDate(resp *http.Response) *time.Time {
   772  	date := resp.Header.Get("Date")
   773  	if date != "" {
   774  		t, err := http.ParseTime(date)
   775  		if err == nil {
   776  			return &t
   777  		}
   778  		s3Log.Warnf("invalidate date for %v: %v",
   779  			resp.Request.URL.Path, date)
   780  	}
   781  	return nil
   782  }
   783  
   784  func (s *S3Backend) PutBlob(param *PutBlobInput) (*PutBlobOutput, error) {
   785  	storageClass := s.config.StorageClass
   786  	if param.Size != nil && *param.Size < 128*1024 && storageClass == "STANDARD_IA" {
   787  		storageClass = "STANDARD"
   788  	}
   789  
   790  	put := &s3.PutObjectInput{
   791  		Bucket:       &s.bucket,
   792  		Key:          &param.Key,
   793  		Metadata:     metadataToLower(param.Metadata),
   794  		Body:         param.Body,
   795  		StorageClass: &storageClass,
   796  		ContentType:  param.ContentType,
   797  	}
   798  
   799  	if s.config.UseSSE {
   800  		put.ServerSideEncryption = &s.sseType
   801  		if s.config.UseKMS && s.config.KMSKeyID != "" {
   802  			put.SSEKMSKeyId = &s.config.KMSKeyID
   803  		}
   804  	} else if s.config.SseC != "" {
   805  		put.SSECustomerAlgorithm = PString("AES256")
   806  		put.SSECustomerKey = &s.config.SseC
   807  		put.SSECustomerKeyMD5 = &s.config.SseCDigest
   808  	}
   809  
   810  	if s.config.ACL != "" {
   811  		put.ACL = &s.config.ACL
   812  	}
   813  
   814  	req, resp := s.PutObjectRequest(put)
   815  	err := req.Send()
   816  	if err != nil {
   817  		return nil, mapAwsError(err)
   818  	}
   819  
   820  	return &PutBlobOutput{
   821  		ETag:         resp.ETag,
   822  		LastModified: getDate(req.HTTPResponse),
   823  		StorageClass: &storageClass,
   824  		RequestId:    s.getRequestId(req),
   825  	}, nil
   826  }
   827  
   828  func (s *S3Backend) MultipartBlobBegin(param *MultipartBlobBeginInput) (*MultipartBlobCommitInput, error) {
   829  	mpu := s3.CreateMultipartUploadInput{
   830  		Bucket:       &s.bucket,
   831  		Key:          &param.Key,
   832  		StorageClass: &s.config.StorageClass,
   833  		ContentType:  param.ContentType,
   834  	}
   835  
   836  	if s.config.UseSSE {
   837  		mpu.ServerSideEncryption = &s.sseType
   838  		if s.config.UseKMS && s.config.KMSKeyID != "" {
   839  			mpu.SSEKMSKeyId = &s.config.KMSKeyID
   840  		}
   841  	} else if s.config.SseC != "" {
   842  		mpu.SSECustomerAlgorithm = PString("AES256")
   843  		mpu.SSECustomerKey = &s.config.SseC
   844  		mpu.SSECustomerKeyMD5 = &s.config.SseCDigest
   845  	}
   846  
   847  	if s.config.ACL != "" {
   848  		mpu.ACL = &s.config.ACL
   849  	}
   850  
   851  	resp, err := s.CreateMultipartUpload(&mpu)
   852  	if err != nil {
   853  		s3Log.Errorf("CreateMultipartUpload %v = %v", param.Key, err)
   854  		return nil, mapAwsError(err)
   855  	}
   856  
   857  	return &MultipartBlobCommitInput{
   858  		Key:      &param.Key,
   859  		Metadata: metadataToLower(param.Metadata),
   860  		UploadId: resp.UploadId,
   861  		Parts:    make([]*string, 10000), // at most 10K parts
   862  	}, nil
   863  }
   864  
   865  func (s *S3Backend) MultipartBlobAdd(param *MultipartBlobAddInput) (*MultipartBlobAddOutput, error) {
   866  	en := &param.Commit.Parts[param.PartNumber-1]
   867  	atomic.AddUint32(&param.Commit.NumParts, 1)
   868  
   869  	params := s3.UploadPartInput{
   870  		Bucket:     &s.bucket,
   871  		Key:        param.Commit.Key,
   872  		PartNumber: aws.Int64(int64(param.PartNumber)),
   873  		UploadId:   param.Commit.UploadId,
   874  		Body:       param.Body,
   875  	}
   876  	if s.config.SseC != "" {
   877  		params.SSECustomerAlgorithm = PString("AES256")
   878  		params.SSECustomerKey = &s.config.SseC
   879  		params.SSECustomerKeyMD5 = &s.config.SseCDigest
   880  	}
   881  	s3Log.Debug(params)
   882  
   883  	req, resp := s.UploadPartRequest(&params)
   884  	err := req.Send()
   885  	if err != nil {
   886  		return nil, mapAwsError(err)
   887  	}
   888  
   889  	if *en != nil {
   890  		panic(fmt.Sprintf("etags for part %v already set: %v", param.PartNumber, **en))
   891  	}
   892  	*en = resp.ETag
   893  
   894  	return &MultipartBlobAddOutput{s.getRequestId(req)}, nil
   895  }
   896  
   897  func (s *S3Backend) MultipartBlobCommit(param *MultipartBlobCommitInput) (*MultipartBlobCommitOutput, error) {
   898  	parts := make([]*s3.CompletedPart, param.NumParts)
   899  	for i := uint32(0); i < param.NumParts; i++ {
   900  		parts[i] = &s3.CompletedPart{
   901  			ETag:       param.Parts[i],
   902  			PartNumber: aws.Int64(int64(i + 1)),
   903  		}
   904  	}
   905  
   906  	mpu := s3.CompleteMultipartUploadInput{
   907  		Bucket:   &s.bucket,
   908  		Key:      param.Key,
   909  		UploadId: param.UploadId,
   910  		MultipartUpload: &s3.CompletedMultipartUpload{
   911  			Parts: parts,
   912  		},
   913  	}
   914  
   915  	s3Log.Debug(mpu)
   916  
   917  	req, resp := s.CompleteMultipartUploadRequest(&mpu)
   918  	err := req.Send()
   919  	if err != nil {
   920  		return nil, mapAwsError(err)
   921  	}
   922  
   923  	s3Log.Debug(resp)
   924  
   925  	return &MultipartBlobCommitOutput{
   926  		ETag:         resp.ETag,
   927  		LastModified: getDate(req.HTTPResponse),
   928  		RequestId:    s.getRequestId(req),
   929  	}, nil
   930  }
   931  
   932  func (s *S3Backend) MultipartBlobAbort(param *MultipartBlobCommitInput) (*MultipartBlobAbortOutput, error) {
   933  	mpu := s3.AbortMultipartUploadInput{
   934  		Bucket:   &s.bucket,
   935  		Key:      param.Key,
   936  		UploadId: param.UploadId,
   937  	}
   938  	req, _ := s.AbortMultipartUploadRequest(&mpu)
   939  	err := req.Send()
   940  	if err != nil {
   941  		return nil, mapAwsError(err)
   942  	}
   943  	return &MultipartBlobAbortOutput{s.getRequestId(req)}, nil
   944  }
   945  
   946  func (s *S3Backend) MultipartExpire(param *MultipartExpireInput) (*MultipartExpireOutput, error) {
   947  	mpu, err := s.ListMultipartUploads(&s3.ListMultipartUploadsInput{
   948  		Bucket: &s.bucket,
   949  	})
   950  	if err != nil {
   951  		return nil, mapAwsError(err)
   952  	}
   953  	s3Log.Debug(mpu)
   954  
   955  	now := time.Now()
   956  	for _, upload := range mpu.Uploads {
   957  		expireTime := upload.Initiated.Add(48 * time.Hour)
   958  
   959  		if !expireTime.After(now) {
   960  			params := &s3.AbortMultipartUploadInput{
   961  				Bucket:   &s.bucket,
   962  				Key:      upload.Key,
   963  				UploadId: upload.UploadId,
   964  			}
   965  			resp, err := s.AbortMultipartUpload(params)
   966  			s3Log.Debug(resp)
   967  
   968  			if mapAwsError(err) == syscall.EACCES {
   969  				break
   970  			}
   971  		} else {
   972  			s3Log.Debugf("Keeping MPU Key=%v Id=%v", *upload.Key, *upload.UploadId)
   973  		}
   974  	}
   975  
   976  	return &MultipartExpireOutput{}, nil
   977  }
   978  
   979  func (s *S3Backend) RemoveBucket(param *RemoveBucketInput) (*RemoveBucketOutput, error) {
   980  	_, err := s.DeleteBucket(&s3.DeleteBucketInput{Bucket: &s.bucket})
   981  	if err != nil {
   982  		return nil, mapAwsError(err)
   983  	}
   984  	return &RemoveBucketOutput{}, nil
   985  }
   986  
   987  func (s *S3Backend) MakeBucket(param *MakeBucketInput) (*MakeBucketOutput, error) {
   988  	_, err := s.CreateBucket(&s3.CreateBucketInput{
   989  		Bucket: &s.bucket,
   990  		ACL:    &s.config.ACL,
   991  	})
   992  	if err != nil {
   993  		return nil, mapAwsError(err)
   994  	}
   995  
   996  	if s.config.BucketOwner != "" {
   997  		var owner s3.Tag
   998  		owner.SetKey("Owner")
   999  		owner.SetValue(s.config.BucketOwner)
  1000  
  1001  		param := s3.PutBucketTaggingInput{
  1002  			Bucket: &s.bucket,
  1003  			Tagging: &s3.Tagging{
  1004  				TagSet: []*s3.Tag{&owner},
  1005  			},
  1006  		}
  1007  
  1008  		for i := 0; i < 10; i++ {
  1009  			_, err = s.PutBucketTagging(&param)
  1010  			err = mapAwsError((err))
  1011  			switch err {
  1012  			case nil:
  1013  				break
  1014  			case syscall.ENXIO, syscall.EINTR:
  1015  				s3Log.Infof("waiting for bucket")
  1016  				time.Sleep((time.Duration(i) + 1) * 2 * time.Second)
  1017  			default:
  1018  				s3Log.Errorf("Failed to tag bucket %v: %v", s.bucket, err)
  1019  				return nil, err
  1020  			}
  1021  		}
  1022  	}
  1023  
  1024  	return &MakeBucketOutput{}, err
  1025  }
  1026  
  1027  func (s *S3Backend) Delegate() interface{} {
  1028  	return s
  1029  }