github.com/mika/distribution@v2.2.2-0.20160108133430-a75790e3d8e0+incompatible/registry/storage/driver/s3/s3.go (about)

     1  // Package s3 provides a storagedriver.StorageDriver implementation to
     2  // store blobs in Amazon S3 cloud storage.
     3  //
     4  // This package leverages the AdRoll/goamz client library for interfacing with
     5  // s3.
     6  //
     7  // Because s3 is a key, value store the Stat call does not support last modification
     8  // time for directories (directories are an abstraction for key, value stores)
     9  //
    10  // Keep in mind that s3 guarantees only eventual consistency, so do not assume
    11  // that a successful write will mean immediate access to the data written (although
    12  // in most regions a new object put has guaranteed read after write). The only true
    13  // guarantee is that once you call Stat and receive a certain file size, that much of
    14  // the file is already accessible.
    15  package s3
    16  
    17  import (
    18  	"bytes"
    19  	"fmt"
    20  	"io"
    21  	"io/ioutil"
    22  	"net/http"
    23  	"reflect"
    24  	"strconv"
    25  	"strings"
    26  	"sync"
    27  	"time"
    28  
    29  	"github.com/AdRoll/goamz/aws"
    30  	"github.com/AdRoll/goamz/s3"
    31  	"github.com/Sirupsen/logrus"
    32  
    33  	"github.com/docker/distribution/context"
    34  	storagedriver "github.com/docker/distribution/registry/storage/driver"
    35  	"github.com/docker/distribution/registry/storage/driver/base"
    36  	"github.com/docker/distribution/registry/storage/driver/factory"
    37  )
    38  
    39  const driverName = "s3"
    40  
    41  // minChunkSize defines the minimum multipart upload chunk size
    42  // S3 API requires multipart upload chunks to be at least 5MB
    43  const minChunkSize = 5 << 20
    44  
    45  const defaultChunkSize = 2 * minChunkSize
    46  
    47  // listMax is the largest amount of objects you can request from S3 in a list call
    48  const listMax = 1000
    49  
    50  //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
    51  type DriverParameters struct {
    52  	AccessKey     string
    53  	SecretKey     string
    54  	Bucket        string
    55  	Region        aws.Region
    56  	Encrypt       bool
    57  	Secure        bool
    58  	V4Auth        bool
    59  	ChunkSize     int64
    60  	RootDirectory string
    61  }
    62  
    63  func init() {
    64  	factory.Register(driverName, &s3DriverFactory{})
    65  }
    66  
    67  // s3DriverFactory implements the factory.StorageDriverFactory interface
    68  type s3DriverFactory struct{}
    69  
    70  func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
    71  	return FromParameters(parameters)
    72  }
    73  
    74  type driver struct {
    75  	S3            *s3.S3
    76  	Bucket        *s3.Bucket
    77  	ChunkSize     int64
    78  	Encrypt       bool
    79  	RootDirectory string
    80  
    81  	pool  sync.Pool // pool []byte buffers used for WriteStream
    82  	zeros []byte    // shared, zero-valued buffer used for WriteStream
    83  }
    84  
    85  type baseEmbed struct {
    86  	base.Base
    87  }
    88  
    89  // Driver is a storagedriver.StorageDriver implementation backed by Amazon S3
    90  // Objects are stored at absolute keys in the provided bucket.
    91  type Driver struct {
    92  	baseEmbed
    93  }
    94  
    95  // FromParameters constructs a new Driver with a given parameters map
    96  // Required parameters:
    97  // - accesskey
    98  // - secretkey
    99  // - region
   100  // - bucket
   101  // - encrypt
   102  func FromParameters(parameters map[string]interface{}) (*Driver, error) {
   103  	// Providing no values for these is valid in case the user is authenticating
   104  	// with an IAM on an ec2 instance (in which case the instance credentials will
   105  	// be summoned when GetAuth is called)
   106  	accessKey, ok := parameters["accesskey"]
   107  	if !ok {
   108  		accessKey = ""
   109  	}
   110  	secretKey, ok := parameters["secretkey"]
   111  	if !ok {
   112  		secretKey = ""
   113  	}
   114  
   115  	regionName, ok := parameters["region"]
   116  	if !ok || fmt.Sprint(regionName) == "" {
   117  		return nil, fmt.Errorf("No region parameter provided")
   118  	}
   119  	region := aws.GetRegion(fmt.Sprint(regionName))
   120  	if region.Name == "" {
   121  		return nil, fmt.Errorf("Invalid region provided: %v", region)
   122  	}
   123  
   124  	bucket, ok := parameters["bucket"]
   125  	if !ok || fmt.Sprint(bucket) == "" {
   126  		return nil, fmt.Errorf("No bucket parameter provided")
   127  	}
   128  
   129  	encryptBool := false
   130  	encrypt, ok := parameters["encrypt"]
   131  	if ok {
   132  		encryptBool, ok = encrypt.(bool)
   133  		if !ok {
   134  			return nil, fmt.Errorf("The encrypt parameter should be a boolean")
   135  		}
   136  	}
   137  
   138  	secureBool := true
   139  	secure, ok := parameters["secure"]
   140  	if ok {
   141  		secureBool, ok = secure.(bool)
   142  		if !ok {
   143  			return nil, fmt.Errorf("The secure parameter should be a boolean")
   144  		}
   145  	}
   146  
   147  	v4AuthBool := false
   148  	v4Auth, ok := parameters["v4auth"]
   149  	if ok {
   150  		v4AuthBool, ok = v4Auth.(bool)
   151  		if !ok {
   152  			return nil, fmt.Errorf("The v4auth parameter should be a boolean")
   153  		}
   154  	}
   155  
   156  	chunkSize := int64(defaultChunkSize)
   157  	chunkSizeParam, ok := parameters["chunksize"]
   158  	if ok {
   159  		switch v := chunkSizeParam.(type) {
   160  		case string:
   161  			vv, err := strconv.ParseInt(v, 0, 64)
   162  			if err != nil {
   163  				return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam)
   164  			}
   165  			chunkSize = vv
   166  		case int64:
   167  			chunkSize = v
   168  		case int, uint, int32, uint32, uint64:
   169  			chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()
   170  		default:
   171  			return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam)
   172  		}
   173  
   174  		if chunkSize < minChunkSize {
   175  			return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize)
   176  		}
   177  	}
   178  
   179  	rootDirectory, ok := parameters["rootdirectory"]
   180  	if !ok {
   181  		rootDirectory = ""
   182  	}
   183  
   184  	params := DriverParameters{
   185  		fmt.Sprint(accessKey),
   186  		fmt.Sprint(secretKey),
   187  		fmt.Sprint(bucket),
   188  		region,
   189  		encryptBool,
   190  		secureBool,
   191  		v4AuthBool,
   192  		chunkSize,
   193  		fmt.Sprint(rootDirectory),
   194  	}
   195  
   196  	return New(params)
   197  }
   198  
   199  // New constructs a new Driver with the given AWS credentials, region, encryption flag, and
   200  // bucketName
   201  func New(params DriverParameters) (*Driver, error) {
   202  	auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{})
   203  	if err != nil {
   204  		return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err)
   205  	}
   206  
   207  	if !params.Secure {
   208  		params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1)
   209  	}
   210  
   211  	s3obj := s3.New(auth, params.Region)
   212  	bucket := s3obj.Bucket(params.Bucket)
   213  
   214  	if params.V4Auth {
   215  		s3obj.Signature = aws.V4Signature
   216  	} else {
   217  		if params.Region.Name == "eu-central-1" {
   218  			return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication")
   219  		}
   220  	}
   221  
   222  	// TODO Currently multipart uploads have no timestamps, so this would be unwise
   223  	// if you initiated a new s3driver while another one is running on the same bucket.
   224  	// multis, _, err := bucket.ListMulti("", "")
   225  	// if err != nil {
   226  	// 	return nil, err
   227  	// }
   228  
   229  	// for _, multi := range multis {
   230  	// 	err := multi.Abort()
   231  	// 	//TODO appropriate to do this error checking?
   232  	// 	if err != nil {
   233  	// 		return nil, err
   234  	// 	}
   235  	// }
   236  
   237  	d := &driver{
   238  		S3:            s3obj,
   239  		Bucket:        bucket,
   240  		ChunkSize:     params.ChunkSize,
   241  		Encrypt:       params.Encrypt,
   242  		RootDirectory: params.RootDirectory,
   243  		zeros:         make([]byte, params.ChunkSize),
   244  	}
   245  
   246  	d.pool.New = func() interface{} {
   247  		return make([]byte, d.ChunkSize)
   248  	}
   249  
   250  	return &Driver{
   251  		baseEmbed: baseEmbed{
   252  			Base: base.Base{
   253  				StorageDriver: d,
   254  			},
   255  		},
   256  	}, nil
   257  }
   258  
   259  // Implement the storagedriver.StorageDriver interface
   260  
   261  func (d *driver) Name() string {
   262  	return driverName
   263  }
   264  
   265  // GetContent retrieves the content stored at "path" as a []byte.
   266  func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
   267  	content, err := d.Bucket.Get(d.s3Path(path))
   268  	if err != nil {
   269  		return nil, parseError(path, err)
   270  	}
   271  	return content, nil
   272  }
   273  
   274  // PutContent stores the []byte content at a location designated by "path".
   275  func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
   276  	return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions()))
   277  }
   278  
   279  // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
   280  // given byte offset.
   281  func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
   282  	headers := make(http.Header)
   283  	headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-")
   284  
   285  	resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers)
   286  	if err != nil {
   287  		if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" {
   288  			return ioutil.NopCloser(bytes.NewReader(nil)), nil
   289  		}
   290  
   291  		return nil, parseError(path, err)
   292  	}
   293  	return resp.Body, nil
   294  }
   295  
   296  // WriteStream stores the contents of the provided io.Reader at a
   297  // location designated by the given path. The driver will know it has
   298  // received the full contents when the reader returns io.EOF. The number
   299  // of successfully READ bytes will be returned, even if an error is
   300  // returned. May be used to resume writing a stream by providing a nonzero
   301  // offset. Offsets past the current size will write from the position
   302  // beyond the end of the file.
   303  func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) {
   304  	partNumber := 1
   305  	bytesRead := 0
   306  	var putErrChan chan error
   307  	parts := []s3.Part{}
   308  	var part s3.Part
   309  	done := make(chan struct{}) // stopgap to free up waiting goroutines
   310  
   311  	multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions())
   312  	if err != nil {
   313  		return 0, err
   314  	}
   315  
   316  	buf := d.getbuf()
   317  
   318  	// We never want to leave a dangling multipart upload, our only consistent state is
   319  	// when there is a whole object at path. This is in order to remain consistent with
   320  	// the stat call.
   321  	//
   322  	// Note that if the machine dies before executing the defer, we will be left with a dangling
   323  	// multipart upload, which will eventually be cleaned up, but we will lose all of the progress
   324  	// made prior to the machine crashing.
   325  	defer func() {
   326  		if putErrChan != nil {
   327  			if putErr := <-putErrChan; putErr != nil {
   328  				err = putErr
   329  			}
   330  		}
   331  
   332  		if len(parts) > 0 {
   333  			if multi == nil {
   334  				// Parts should be empty if the multi is not initialized
   335  				panic("Unreachable")
   336  			} else {
   337  				if multi.Complete(parts) != nil {
   338  					multi.Abort()
   339  				}
   340  			}
   341  		}
   342  
   343  		d.putbuf(buf) // needs to be here to pick up new buf value
   344  		close(done)   // free up any waiting goroutines
   345  	}()
   346  
   347  	// Fills from 0 to total from current
   348  	fromSmallCurrent := func(total int64) error {
   349  		current, err := d.ReadStream(ctx, path, 0)
   350  		if err != nil {
   351  			return err
   352  		}
   353  
   354  		bytesRead = 0
   355  		for int64(bytesRead) < total {
   356  			//The loop should very rarely enter a second iteration
   357  			nn, err := current.Read(buf[bytesRead:total])
   358  			bytesRead += nn
   359  			if err != nil {
   360  				if err != io.EOF {
   361  					return err
   362  				}
   363  
   364  				break
   365  			}
   366  
   367  		}
   368  		return nil
   369  	}
   370  
   371  	// Fills from parameter to chunkSize from reader
   372  	fromReader := func(from int64) error {
   373  		bytesRead = 0
   374  		for from+int64(bytesRead) < d.ChunkSize {
   375  			nn, err := reader.Read(buf[from+int64(bytesRead):])
   376  			totalRead += int64(nn)
   377  			bytesRead += nn
   378  
   379  			if err != nil {
   380  				if err != io.EOF {
   381  					return err
   382  				}
   383  
   384  				break
   385  			}
   386  		}
   387  
   388  		if putErrChan == nil {
   389  			putErrChan = make(chan error)
   390  		} else {
   391  			if putErr := <-putErrChan; putErr != nil {
   392  				putErrChan = nil
   393  				return putErr
   394  			}
   395  		}
   396  
   397  		go func(bytesRead int, from int64, buf []byte) {
   398  			defer d.putbuf(buf) // this buffer gets dropped after this call
   399  
   400  			// DRAGONS(stevvooe): There are few things one might want to know
   401  			// about this section. First, the putErrChan is expecting an error
   402  			// and a nil or just a nil to come through the channel. This is
   403  			// covered by the silly defer below. The other aspect is the s3
   404  			// retry backoff to deal with RequestTimeout errors. Even though
   405  			// the underlying s3 library should handle it, it doesn't seem to
   406  			// be part of the shouldRetry function (see AdRoll/goamz/s3).
   407  			defer func() {
   408  				select {
   409  				case putErrChan <- nil: // for some reason, we do this no matter what.
   410  				case <-done:
   411  					return // ensure we don't leak the goroutine
   412  				}
   413  			}()
   414  
   415  			if bytesRead <= 0 {
   416  				return
   417  			}
   418  
   419  			var err error
   420  			var part s3.Part
   421  
   422  		loop:
   423  			for retries := 0; retries < 5; retries++ {
   424  				part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]))
   425  				if err == nil {
   426  					break // success!
   427  				}
   428  
   429  				// NOTE(stevvooe): This retry code tries to only retry under
   430  				// conditions where the s3 package does not. We may add s3
   431  				// error codes to the below if we see others bubble up in the
   432  				// application. Right now, the most troubling is
   433  				// RequestTimeout, which seems to only triggered when a tcp
   434  				// connection to s3 slows to a crawl. If the RequestTimeout
   435  				// ends up getting added to the s3 library and we don't see
   436  				// other errors, this retry loop can be removed.
   437  				switch err := err.(type) {
   438  				case *s3.Error:
   439  					switch err.Code {
   440  					case "RequestTimeout":
   441  						// allow retries on only this error.
   442  					default:
   443  						break loop
   444  					}
   445  				}
   446  
   447  				backoff := 100 * time.Millisecond * time.Duration(retries+1)
   448  				logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String())
   449  				time.Sleep(backoff)
   450  			}
   451  
   452  			if err != nil {
   453  				logrus.Errorf("error putting part, aborting: %v", err)
   454  				select {
   455  				case putErrChan <- err:
   456  				case <-done:
   457  					return // don't leak the goroutine
   458  				}
   459  			}
   460  
   461  			// parts and partNumber are safe, because this function is the
   462  			// only one modifying them and we force it to be executed
   463  			// serially.
   464  			parts = append(parts, part)
   465  			partNumber++
   466  		}(bytesRead, from, buf)
   467  
   468  		buf = d.getbuf() // use a new buffer for the next call
   469  		return nil
   470  	}
   471  
   472  	if offset > 0 {
   473  		resp, err := d.Bucket.Head(d.s3Path(path), nil)
   474  		if err != nil {
   475  			if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" {
   476  				return 0, err
   477  			}
   478  		}
   479  
   480  		currentLength := int64(0)
   481  		if err == nil {
   482  			currentLength = resp.ContentLength
   483  		}
   484  
   485  		if currentLength >= offset {
   486  			if offset < d.ChunkSize {
   487  				// chunkSize > currentLength >= offset
   488  				if err = fromSmallCurrent(offset); err != nil {
   489  					return totalRead, err
   490  				}
   491  
   492  				if err = fromReader(offset); err != nil {
   493  					return totalRead, err
   494  				}
   495  
   496  				if totalRead+offset < d.ChunkSize {
   497  					return totalRead, nil
   498  				}
   499  			} else {
   500  				// currentLength >= offset >= chunkSize
   501  				_, part, err = multi.PutPartCopy(partNumber,
   502  					s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)},
   503  					d.Bucket.Name+"/"+d.s3Path(path))
   504  				if err != nil {
   505  					return 0, err
   506  				}
   507  
   508  				parts = append(parts, part)
   509  				partNumber++
   510  			}
   511  		} else {
   512  			// Fills between parameters with 0s but only when to - from <= chunkSize
   513  			fromZeroFillSmall := func(from, to int64) error {
   514  				bytesRead = 0
   515  				for from+int64(bytesRead) < to {
   516  					nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to])
   517  					bytesRead += nn
   518  					if err != nil {
   519  						return err
   520  					}
   521  				}
   522  
   523  				return nil
   524  			}
   525  
   526  			// Fills between parameters with 0s, making new parts
   527  			fromZeroFillLarge := func(from, to int64) error {
   528  				bytesRead64 := int64(0)
   529  				for to-(from+bytesRead64) >= d.ChunkSize {
   530  					part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros))
   531  					if err != nil {
   532  						return err
   533  					}
   534  					bytesRead64 += d.ChunkSize
   535  
   536  					parts = append(parts, part)
   537  					partNumber++
   538  				}
   539  
   540  				return fromZeroFillSmall(0, (to-from)%d.ChunkSize)
   541  			}
   542  
   543  			// currentLength < offset
   544  			if currentLength < d.ChunkSize {
   545  				if offset < d.ChunkSize {
   546  					// chunkSize > offset > currentLength
   547  					if err = fromSmallCurrent(currentLength); err != nil {
   548  						return totalRead, err
   549  					}
   550  
   551  					if err = fromZeroFillSmall(currentLength, offset); err != nil {
   552  						return totalRead, err
   553  					}
   554  
   555  					if err = fromReader(offset); err != nil {
   556  						return totalRead, err
   557  					}
   558  
   559  					if totalRead+offset < d.ChunkSize {
   560  						return totalRead, nil
   561  					}
   562  				} else {
   563  					// offset >= chunkSize > currentLength
   564  					if err = fromSmallCurrent(currentLength); err != nil {
   565  						return totalRead, err
   566  					}
   567  
   568  					if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil {
   569  						return totalRead, err
   570  					}
   571  
   572  					part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf))
   573  					if err != nil {
   574  						return totalRead, err
   575  					}
   576  
   577  					parts = append(parts, part)
   578  					partNumber++
   579  
   580  					//Zero fill from chunkSize up to offset, then some reader
   581  					if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil {
   582  						return totalRead, err
   583  					}
   584  
   585  					if err = fromReader(offset % d.ChunkSize); err != nil {
   586  						return totalRead, err
   587  					}
   588  
   589  					if totalRead+(offset%d.ChunkSize) < d.ChunkSize {
   590  						return totalRead, nil
   591  					}
   592  				}
   593  			} else {
   594  				// offset > currentLength >= chunkSize
   595  				_, part, err = multi.PutPartCopy(partNumber,
   596  					s3.CopyOptions{},
   597  					d.Bucket.Name+"/"+d.s3Path(path))
   598  				if err != nil {
   599  					return 0, err
   600  				}
   601  
   602  				parts = append(parts, part)
   603  				partNumber++
   604  
   605  				//Zero fill from currentLength up to offset, then some reader
   606  				if err = fromZeroFillLarge(currentLength, offset); err != nil {
   607  					return totalRead, err
   608  				}
   609  
   610  				if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil {
   611  					return totalRead, err
   612  				}
   613  
   614  				if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize {
   615  					return totalRead, nil
   616  				}
   617  			}
   618  
   619  		}
   620  	}
   621  
   622  	for {
   623  		if err = fromReader(0); err != nil {
   624  			return totalRead, err
   625  		}
   626  
   627  		if int64(bytesRead) < d.ChunkSize {
   628  			break
   629  		}
   630  	}
   631  
   632  	return totalRead, nil
   633  }
   634  
   635  // Stat retrieves the FileInfo for the given path, including the current size
   636  // in bytes and the creation time.
   637  func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
   638  	listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1)
   639  	if err != nil {
   640  		return nil, err
   641  	}
   642  
   643  	fi := storagedriver.FileInfoFields{
   644  		Path: path,
   645  	}
   646  
   647  	if len(listResponse.Contents) == 1 {
   648  		if listResponse.Contents[0].Key != d.s3Path(path) {
   649  			fi.IsDir = true
   650  		} else {
   651  			fi.IsDir = false
   652  			fi.Size = listResponse.Contents[0].Size
   653  
   654  			timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified)
   655  			if err != nil {
   656  				return nil, err
   657  			}
   658  			fi.ModTime = timestamp
   659  		}
   660  	} else if len(listResponse.CommonPrefixes) == 1 {
   661  		fi.IsDir = true
   662  	} else {
   663  		return nil, storagedriver.PathNotFoundError{Path: path}
   664  	}
   665  
   666  	return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
   667  }
   668  
   669  // List returns a list of the objects that are direct descendants of the given path.
   670  func (d *driver) List(ctx context.Context, opath string) ([]string, error) {
   671  	path := opath
   672  	if path != "/" && path[len(path)-1] != '/' {
   673  		path = path + "/"
   674  	}
   675  
   676  	// This is to cover for the cases when the rootDirectory of the driver is either "" or "/".
   677  	// In those cases, there is no root prefix to replace and we must actually add a "/" to all
   678  	// results in order to keep them as valid paths as recognized by storagedriver.PathRegexp
   679  	prefix := ""
   680  	if d.s3Path("") == "" {
   681  		prefix = "/"
   682  	}
   683  
   684  	listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax)
   685  	if err != nil {
   686  		return nil, parseError(opath, err)
   687  	}
   688  
   689  	files := []string{}
   690  	directories := []string{}
   691  
   692  	for {
   693  		for _, key := range listResponse.Contents {
   694  			files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1))
   695  		}
   696  
   697  		for _, commonPrefix := range listResponse.CommonPrefixes {
   698  			directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1))
   699  		}
   700  
   701  		if listResponse.IsTruncated {
   702  			listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax)
   703  			if err != nil {
   704  				return nil, err
   705  			}
   706  		} else {
   707  			break
   708  		}
   709  	}
   710  
   711  	if opath != "/" {
   712  		if len(files) == 0 && len(directories) == 0 {
   713  			// Treat empty response as missing directory, since we don't actually
   714  			// have directories in s3.
   715  			return nil, storagedriver.PathNotFoundError{Path: opath}
   716  		}
   717  	}
   718  
   719  	return append(files, directories...), nil
   720  }
   721  
   722  // Move moves an object stored at sourcePath to destPath, removing the original
   723  // object.
   724  func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
   725  	/* This is terrible, but aws doesn't have an actual move. */
   726  	_, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(),
   727  		s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath))
   728  	if err != nil {
   729  		return parseError(sourcePath, err)
   730  	}
   731  
   732  	return d.Delete(ctx, sourcePath)
   733  }
   734  
   735  // Delete recursively deletes all objects stored at "path" and its subpaths.
   736  func (d *driver) Delete(ctx context.Context, path string) error {
   737  	listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax)
   738  	if err != nil || len(listResponse.Contents) == 0 {
   739  		return storagedriver.PathNotFoundError{Path: path}
   740  	}
   741  
   742  	s3Objects := make([]s3.Object, listMax)
   743  
   744  	for len(listResponse.Contents) > 0 {
   745  		for index, key := range listResponse.Contents {
   746  			s3Objects[index].Key = key.Key
   747  		}
   748  
   749  		err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]})
   750  		if err != nil {
   751  			return nil
   752  		}
   753  
   754  		listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax)
   755  		if err != nil {
   756  			return err
   757  		}
   758  	}
   759  
   760  	return nil
   761  }
   762  
   763  // URLFor returns a URL which may be used to retrieve the content stored at the given path.
   764  // May return an UnsupportedMethodErr in certain StorageDriver implementations.
   765  func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
   766  	methodString := "GET"
   767  	method, ok := options["method"]
   768  	if ok {
   769  		methodString, ok = method.(string)
   770  		if !ok || (methodString != "GET" && methodString != "HEAD") {
   771  			return "", storagedriver.ErrUnsupportedMethod{}
   772  		}
   773  	}
   774  
   775  	expiresTime := time.Now().Add(20 * time.Minute)
   776  	expires, ok := options["expiry"]
   777  	if ok {
   778  		et, ok := expires.(time.Time)
   779  		if ok {
   780  			expiresTime = et
   781  		}
   782  	}
   783  
   784  	return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil
   785  }
   786  
   787  func (d *driver) s3Path(path string) string {
   788  	return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/")
   789  }
   790  
   791  // S3BucketKey returns the s3 bucket key for the given storage driver path.
   792  func (d *Driver) S3BucketKey(path string) string {
   793  	return d.StorageDriver.(*driver).s3Path(path)
   794  }
   795  
   796  func parseError(path string, err error) error {
   797  	if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" {
   798  		return storagedriver.PathNotFoundError{Path: path}
   799  	}
   800  
   801  	return err
   802  }
   803  
   804  func hasCode(err error, code string) bool {
   805  	s3err, ok := err.(*aws.Error)
   806  	return ok && s3err.Code == code
   807  }
   808  
   809  func (d *driver) getOptions() s3.Options {
   810  	return s3.Options{SSE: d.Encrypt}
   811  }
   812  
   813  func getPermissions() s3.ACL {
   814  	return s3.Private
   815  }
   816  
   817  func (d *driver) getContentType() string {
   818  	return "application/octet-stream"
   819  }
   820  
   821  // getbuf returns a buffer from the driver's pool with length d.ChunkSize.
   822  func (d *driver) getbuf() []byte {
   823  	return d.pool.Get().([]byte)
   824  }
   825  
   826  func (d *driver) putbuf(p []byte) {
   827  	copy(p, d.zeros)
   828  	d.pool.Put(p)
   829  }