github.com/lusis/distribution@v2.0.1+incompatible/registry/storage/driver/s3/s3.go (about)

     1  // Package s3 provides a storagedriver.StorageDriver implementation to
     2  // store blobs in Amazon S3 cloud storage.
     3  //
     4  // This package leverages the AdRoll/goamz client library for interfacing with
     5  // s3.
     6  //
     7  // Because s3 is a key, value store the Stat call does not support last modification
     8  // time for directories (directories are an abstraction for key, value stores)
     9  //
    10  // Keep in mind that s3 guarantees only eventual consistency, so do not assume
    11  // that a successful write will mean immediate access to the data written (although
    12  // in most regions a new object put has guaranteed read after write). The only true
    13  // guarantee is that once you call Stat and receive a certain file size, that much of
    14  // the file is already accessible.
    15  package s3
    16  
    17  import (
    18  	"bytes"
    19  	"fmt"
    20  	"io"
    21  	"io/ioutil"
    22  	"net/http"
    23  	"reflect"
    24  	"strconv"
    25  	"strings"
    26  	"sync"
    27  	"time"
    28  
    29  	"github.com/AdRoll/goamz/aws"
    30  	"github.com/AdRoll/goamz/s3"
    31  	"github.com/Sirupsen/logrus"
    32  	storagedriver "github.com/docker/distribution/registry/storage/driver"
    33  	"github.com/docker/distribution/registry/storage/driver/base"
    34  	"github.com/docker/distribution/registry/storage/driver/factory"
    35  )
    36  
    37  const driverName = "s3"
    38  
    39  // minChunkSize defines the minimum multipart upload chunk size
    40  // S3 API requires multipart upload chunks to be at least 5MB
    41  const minChunkSize = 5 << 20
    42  
    43  const defaultChunkSize = 2 * minChunkSize
    44  
    45  // listMax is the largest amount of objects you can request from S3 in a list call
    46  const listMax = 1000
    47  
    48  //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
    49  type DriverParameters struct {
    50  	AccessKey     string
    51  	SecretKey     string
    52  	Bucket        string
    53  	Region        aws.Region
    54  	Encrypt       bool
    55  	Secure        bool
    56  	V4Auth        bool
    57  	ChunkSize     int64
    58  	RootDirectory string
    59  }
    60  
    61  func init() {
    62  	factory.Register(driverName, &s3DriverFactory{})
    63  }
    64  
    65  // s3DriverFactory implements the factory.StorageDriverFactory interface
    66  type s3DriverFactory struct{}
    67  
    68  func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
    69  	return FromParameters(parameters)
    70  }
    71  
    72  type driver struct {
    73  	S3            *s3.S3
    74  	Bucket        *s3.Bucket
    75  	ChunkSize     int64
    76  	Encrypt       bool
    77  	RootDirectory string
    78  
    79  	pool  sync.Pool // pool []byte buffers used for WriteStream
    80  	zeros []byte    // shared, zero-valued buffer used for WriteStream
    81  }
    82  
    83  type baseEmbed struct {
    84  	base.Base
    85  }
    86  
    87  // Driver is a storagedriver.StorageDriver implementation backed by Amazon S3
    88  // Objects are stored at absolute keys in the provided bucket.
    89  type Driver struct {
    90  	baseEmbed
    91  }
    92  
    93  // FromParameters constructs a new Driver with a given parameters map
    94  // Required parameters:
    95  // - accesskey
    96  // - secretkey
    97  // - region
    98  // - bucket
    99  // - encrypt
   100  func FromParameters(parameters map[string]interface{}) (*Driver, error) {
   101  	// Providing no values for these is valid in case the user is authenticating
   102  	// with an IAM on an ec2 instance (in which case the instance credentials will
   103  	// be summoned when GetAuth is called)
   104  	accessKey, ok := parameters["accesskey"]
   105  	if !ok {
   106  		accessKey = ""
   107  	}
   108  	secretKey, ok := parameters["secretkey"]
   109  	if !ok {
   110  		secretKey = ""
   111  	}
   112  
   113  	regionName, ok := parameters["region"]
   114  	if !ok || fmt.Sprint(regionName) == "" {
   115  		return nil, fmt.Errorf("No region parameter provided")
   116  	}
   117  	region := aws.GetRegion(fmt.Sprint(regionName))
   118  	if region.Name == "" {
   119  		return nil, fmt.Errorf("Invalid region provided: %v", region)
   120  	}
   121  
   122  	bucket, ok := parameters["bucket"]
   123  	if !ok || fmt.Sprint(bucket) == "" {
   124  		return nil, fmt.Errorf("No bucket parameter provided")
   125  	}
   126  
   127  	encryptBool := false
   128  	encrypt, ok := parameters["encrypt"]
   129  	if ok {
   130  		encryptBool, ok = encrypt.(bool)
   131  		if !ok {
   132  			return nil, fmt.Errorf("The encrypt parameter should be a boolean")
   133  		}
   134  	}
   135  
   136  	secureBool := true
   137  	secure, ok := parameters["secure"]
   138  	if ok {
   139  		secureBool, ok = secure.(bool)
   140  		if !ok {
   141  			return nil, fmt.Errorf("The secure parameter should be a boolean")
   142  		}
   143  	}
   144  
   145  	v4AuthBool := false
   146  	v4Auth, ok := parameters["v4auth"]
   147  	if ok {
   148  		v4AuthBool, ok = v4Auth.(bool)
   149  		if !ok {
   150  			return nil, fmt.Errorf("The v4auth parameter should be a boolean")
   151  		}
   152  	}
   153  
   154  	chunkSize := int64(defaultChunkSize)
   155  	chunkSizeParam, ok := parameters["chunksize"]
   156  	if ok {
   157  		switch v := chunkSizeParam.(type) {
   158  		case string:
   159  			vv, err := strconv.ParseInt(v, 0, 64)
   160  			if err != nil {
   161  				return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam)
   162  			}
   163  			chunkSize = vv
   164  		case int64:
   165  			chunkSize = v
   166  		case int, uint, int32, uint32, uint64:
   167  			chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()
   168  		default:
   169  			return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam)
   170  		}
   171  
   172  		if chunkSize < minChunkSize {
   173  			return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize)
   174  		}
   175  	}
   176  
   177  	rootDirectory, ok := parameters["rootdirectory"]
   178  	if !ok {
   179  		rootDirectory = ""
   180  	}
   181  
   182  	params := DriverParameters{
   183  		fmt.Sprint(accessKey),
   184  		fmt.Sprint(secretKey),
   185  		fmt.Sprint(bucket),
   186  		region,
   187  		encryptBool,
   188  		secureBool,
   189  		v4AuthBool,
   190  		chunkSize,
   191  		fmt.Sprint(rootDirectory),
   192  	}
   193  
   194  	return New(params)
   195  }
   196  
   197  // New constructs a new Driver with the given AWS credentials, region, encryption flag, and
   198  // bucketName
   199  func New(params DriverParameters) (*Driver, error) {
   200  	auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{})
   201  	if err != nil {
   202  		return nil, err
   203  	}
   204  
   205  	if !params.Secure {
   206  		params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1)
   207  	}
   208  
   209  	s3obj := s3.New(auth, params.Region)
   210  	bucket := s3obj.Bucket(params.Bucket)
   211  
   212  	if params.V4Auth {
   213  		s3obj.Signature = aws.V4Signature
   214  	} else {
   215  		if params.Region.Name == "eu-central-1" {
   216  			return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication")
   217  		}
   218  	}
   219  
   220  	// Validate that the given credentials have at least read permissions in the
   221  	// given bucket scope.
   222  	if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil {
   223  		return nil, err
   224  	}
   225  
   226  	// TODO Currently multipart uploads have no timestamps, so this would be unwise
   227  	// if you initiated a new s3driver while another one is running on the same bucket.
   228  	// multis, _, err := bucket.ListMulti("", "")
   229  	// if err != nil {
   230  	// 	return nil, err
   231  	// }
   232  
   233  	// for _, multi := range multis {
   234  	// 	err := multi.Abort()
   235  	// 	//TODO appropriate to do this error checking?
   236  	// 	if err != nil {
   237  	// 		return nil, err
   238  	// 	}
   239  	// }
   240  
   241  	d := &driver{
   242  		S3:            s3obj,
   243  		Bucket:        bucket,
   244  		ChunkSize:     params.ChunkSize,
   245  		Encrypt:       params.Encrypt,
   246  		RootDirectory: params.RootDirectory,
   247  		zeros:         make([]byte, params.ChunkSize),
   248  	}
   249  
   250  	d.pool.New = func() interface{} {
   251  		return make([]byte, d.ChunkSize)
   252  	}
   253  
   254  	return &Driver{
   255  		baseEmbed: baseEmbed{
   256  			Base: base.Base{
   257  				StorageDriver: d,
   258  			},
   259  		},
   260  	}, nil
   261  }
   262  
   263  // Implement the storagedriver.StorageDriver interface
   264  
   265  func (d *driver) Name() string {
   266  	return driverName
   267  }
   268  
   269  // GetContent retrieves the content stored at "path" as a []byte.
   270  func (d *driver) GetContent(path string) ([]byte, error) {
   271  	content, err := d.Bucket.Get(d.s3Path(path))
   272  	if err != nil {
   273  		return nil, parseError(path, err)
   274  	}
   275  	return content, nil
   276  }
   277  
   278  // PutContent stores the []byte content at a location designated by "path".
   279  func (d *driver) PutContent(path string, contents []byte) error {
   280  	return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions()))
   281  }
   282  
   283  // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
   284  // given byte offset.
   285  func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
   286  	headers := make(http.Header)
   287  	headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-")
   288  
   289  	resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers)
   290  	if err != nil {
   291  		if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" {
   292  			return ioutil.NopCloser(bytes.NewReader(nil)), nil
   293  		}
   294  
   295  		return nil, parseError(path, err)
   296  	}
   297  	return resp.Body, nil
   298  }
   299  
   300  // WriteStream stores the contents of the provided io.Reader at a
   301  // location designated by the given path. The driver will know it has
   302  // received the full contents when the reader returns io.EOF. The number
   303  // of successfully READ bytes will be returned, even if an error is
   304  // returned. May be used to resume writing a stream by providing a nonzero
   305  // offset. Offsets past the current size will write from the position
   306  // beyond the end of the file.
   307  func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (totalRead int64, err error) {
   308  	partNumber := 1
   309  	bytesRead := 0
   310  	var putErrChan chan error
   311  	parts := []s3.Part{}
   312  	var part s3.Part
   313  	done := make(chan struct{}) // stopgap to free up waiting goroutines
   314  
   315  	multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions())
   316  	if err != nil {
   317  		return 0, err
   318  	}
   319  
   320  	buf := d.getbuf()
   321  
   322  	// We never want to leave a dangling multipart upload, our only consistent state is
   323  	// when there is a whole object at path. This is in order to remain consistent with
   324  	// the stat call.
   325  	//
   326  	// Note that if the machine dies before executing the defer, we will be left with a dangling
   327  	// multipart upload, which will eventually be cleaned up, but we will lose all of the progress
   328  	// made prior to the machine crashing.
   329  	defer func() {
   330  		if putErrChan != nil {
   331  			if putErr := <-putErrChan; putErr != nil {
   332  				err = putErr
   333  			}
   334  		}
   335  
   336  		if len(parts) > 0 {
   337  			if multi == nil {
   338  				// Parts should be empty if the multi is not initialized
   339  				panic("Unreachable")
   340  			} else {
   341  				if multi.Complete(parts) != nil {
   342  					multi.Abort()
   343  				}
   344  			}
   345  		}
   346  
   347  		d.putbuf(buf) // needs to be here to pick up new buf value
   348  		close(done)   // free up any waiting goroutines
   349  	}()
   350  
   351  	// Fills from 0 to total from current
   352  	fromSmallCurrent := func(total int64) error {
   353  		current, err := d.ReadStream(path, 0)
   354  		if err != nil {
   355  			return err
   356  		}
   357  
   358  		bytesRead = 0
   359  		for int64(bytesRead) < total {
   360  			//The loop should very rarely enter a second iteration
   361  			nn, err := current.Read(buf[bytesRead:total])
   362  			bytesRead += nn
   363  			if err != nil {
   364  				if err != io.EOF {
   365  					return err
   366  				}
   367  
   368  				break
   369  			}
   370  
   371  		}
   372  		return nil
   373  	}
   374  
   375  	// Fills from parameter to chunkSize from reader
   376  	fromReader := func(from int64) error {
   377  		bytesRead = 0
   378  		for from+int64(bytesRead) < d.ChunkSize {
   379  			nn, err := reader.Read(buf[from+int64(bytesRead):])
   380  			totalRead += int64(nn)
   381  			bytesRead += nn
   382  
   383  			if err != nil {
   384  				if err != io.EOF {
   385  					return err
   386  				}
   387  
   388  				break
   389  			}
   390  		}
   391  
   392  		if putErrChan == nil {
   393  			putErrChan = make(chan error)
   394  		} else {
   395  			if putErr := <-putErrChan; putErr != nil {
   396  				putErrChan = nil
   397  				return putErr
   398  			}
   399  		}
   400  
   401  		go func(bytesRead int, from int64, buf []byte) {
   402  			defer d.putbuf(buf) // this buffer gets dropped after this call
   403  
   404  			// DRAGONS(stevvooe): There are few things one might want to know
   405  			// about this section. First, the putErrChan is expecting an error
   406  			// and a nil or just a nil to come through the channel. This is
   407  			// covered by the silly defer below. The other aspect is the s3
   408  			// retry backoff to deal with RequestTimeout errors. Even though
   409  			// the underlying s3 library should handle it, it doesn't seem to
   410  			// be part of the shouldRetry function (see AdRoll/goamz/s3).
   411  			defer func() {
   412  				select {
   413  				case putErrChan <- nil: // for some reason, we do this no matter what.
   414  				case <-done:
   415  					return // ensure we don't leak the goroutine
   416  				}
   417  			}()
   418  
   419  			if bytesRead <= 0 {
   420  				return
   421  			}
   422  
   423  			var err error
   424  			var part s3.Part
   425  
   426  		loop:
   427  			for retries := 0; retries < 5; retries++ {
   428  				part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]))
   429  				if err == nil {
   430  					break // success!
   431  				}
   432  
   433  				// NOTE(stevvooe): This retry code tries to only retry under
   434  				// conditions where the s3 package does not. We may add s3
   435  				// error codes to the below if we see others bubble up in the
   436  				// application. Right now, the most troubling is
   437  				// RequestTimeout, which seems to only triggered when a tcp
   438  				// connection to s3 slows to a crawl. If the RequestTimeout
   439  				// ends up getting added to the s3 library and we don't see
   440  				// other errors, this retry loop can be removed.
   441  				switch err := err.(type) {
   442  				case *s3.Error:
   443  					switch err.Code {
   444  					case "RequestTimeout":
   445  						// allow retries on only this error.
   446  					default:
   447  						break loop
   448  					}
   449  				}
   450  
   451  				backoff := 100 * time.Millisecond * time.Duration(retries+1)
   452  				logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String())
   453  				time.Sleep(backoff)
   454  			}
   455  
   456  			if err != nil {
   457  				logrus.Errorf("error putting part, aborting: %v", err)
   458  				select {
   459  				case putErrChan <- err:
   460  				case <-done:
   461  					return // don't leak the goroutine
   462  				}
   463  			}
   464  
   465  			// parts and partNumber are safe, because this function is the
   466  			// only one modifying them and we force it to be executed
   467  			// serially.
   468  			parts = append(parts, part)
   469  			partNumber++
   470  		}(bytesRead, from, buf)
   471  
   472  		buf = d.getbuf() // use a new buffer for the next call
   473  		return nil
   474  	}
   475  
   476  	if offset > 0 {
   477  		resp, err := d.Bucket.Head(d.s3Path(path), nil)
   478  		if err != nil {
   479  			if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" {
   480  				return 0, err
   481  			}
   482  		}
   483  
   484  		currentLength := int64(0)
   485  		if err == nil {
   486  			currentLength = resp.ContentLength
   487  		}
   488  
   489  		if currentLength >= offset {
   490  			if offset < d.ChunkSize {
   491  				// chunkSize > currentLength >= offset
   492  				if err = fromSmallCurrent(offset); err != nil {
   493  					return totalRead, err
   494  				}
   495  
   496  				if err = fromReader(offset); err != nil {
   497  					return totalRead, err
   498  				}
   499  
   500  				if totalRead+offset < d.ChunkSize {
   501  					return totalRead, nil
   502  				}
   503  			} else {
   504  				// currentLength >= offset >= chunkSize
   505  				_, part, err = multi.PutPartCopy(partNumber,
   506  					s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)},
   507  					d.Bucket.Name+"/"+d.s3Path(path))
   508  				if err != nil {
   509  					return 0, err
   510  				}
   511  
   512  				parts = append(parts, part)
   513  				partNumber++
   514  			}
   515  		} else {
   516  			// Fills between parameters with 0s but only when to - from <= chunkSize
   517  			fromZeroFillSmall := func(from, to int64) error {
   518  				bytesRead = 0
   519  				for from+int64(bytesRead) < to {
   520  					nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to])
   521  					bytesRead += nn
   522  					if err != nil {
   523  						return err
   524  					}
   525  				}
   526  
   527  				return nil
   528  			}
   529  
   530  			// Fills between parameters with 0s, making new parts
   531  			fromZeroFillLarge := func(from, to int64) error {
   532  				bytesRead64 := int64(0)
   533  				for to-(from+bytesRead64) >= d.ChunkSize {
   534  					part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros))
   535  					if err != nil {
   536  						return err
   537  					}
   538  					bytesRead64 += d.ChunkSize
   539  
   540  					parts = append(parts, part)
   541  					partNumber++
   542  				}
   543  
   544  				return fromZeroFillSmall(0, (to-from)%d.ChunkSize)
   545  			}
   546  
   547  			// currentLength < offset
   548  			if currentLength < d.ChunkSize {
   549  				if offset < d.ChunkSize {
   550  					// chunkSize > offset > currentLength
   551  					if err = fromSmallCurrent(currentLength); err != nil {
   552  						return totalRead, err
   553  					}
   554  
   555  					if err = fromZeroFillSmall(currentLength, offset); err != nil {
   556  						return totalRead, err
   557  					}
   558  
   559  					if err = fromReader(offset); err != nil {
   560  						return totalRead, err
   561  					}
   562  
   563  					if totalRead+offset < d.ChunkSize {
   564  						return totalRead, nil
   565  					}
   566  				} else {
   567  					// offset >= chunkSize > currentLength
   568  					if err = fromSmallCurrent(currentLength); err != nil {
   569  						return totalRead, err
   570  					}
   571  
   572  					if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil {
   573  						return totalRead, err
   574  					}
   575  
   576  					part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf))
   577  					if err != nil {
   578  						return totalRead, err
   579  					}
   580  
   581  					parts = append(parts, part)
   582  					partNumber++
   583  
   584  					//Zero fill from chunkSize up to offset, then some reader
   585  					if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil {
   586  						return totalRead, err
   587  					}
   588  
   589  					if err = fromReader(offset % d.ChunkSize); err != nil {
   590  						return totalRead, err
   591  					}
   592  
   593  					if totalRead+(offset%d.ChunkSize) < d.ChunkSize {
   594  						return totalRead, nil
   595  					}
   596  				}
   597  			} else {
   598  				// offset > currentLength >= chunkSize
   599  				_, part, err = multi.PutPartCopy(partNumber,
   600  					s3.CopyOptions{},
   601  					d.Bucket.Name+"/"+d.s3Path(path))
   602  				if err != nil {
   603  					return 0, err
   604  				}
   605  
   606  				parts = append(parts, part)
   607  				partNumber++
   608  
   609  				//Zero fill from currentLength up to offset, then some reader
   610  				if err = fromZeroFillLarge(currentLength, offset); err != nil {
   611  					return totalRead, err
   612  				}
   613  
   614  				if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil {
   615  					return totalRead, err
   616  				}
   617  
   618  				if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize {
   619  					return totalRead, nil
   620  				}
   621  			}
   622  
   623  		}
   624  	}
   625  
   626  	for {
   627  		if err = fromReader(0); err != nil {
   628  			return totalRead, err
   629  		}
   630  
   631  		if int64(bytesRead) < d.ChunkSize {
   632  			break
   633  		}
   634  	}
   635  
   636  	return totalRead, nil
   637  }
   638  
   639  // Stat retrieves the FileInfo for the given path, including the current size
   640  // in bytes and the creation time.
   641  func (d *driver) Stat(path string) (storagedriver.FileInfo, error) {
   642  	listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1)
   643  	if err != nil {
   644  		return nil, err
   645  	}
   646  
   647  	fi := storagedriver.FileInfoFields{
   648  		Path: path,
   649  	}
   650  
   651  	if len(listResponse.Contents) == 1 {
   652  		if listResponse.Contents[0].Key != d.s3Path(path) {
   653  			fi.IsDir = true
   654  		} else {
   655  			fi.IsDir = false
   656  			fi.Size = listResponse.Contents[0].Size
   657  
   658  			timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified)
   659  			if err != nil {
   660  				return nil, err
   661  			}
   662  			fi.ModTime = timestamp
   663  		}
   664  	} else if len(listResponse.CommonPrefixes) == 1 {
   665  		fi.IsDir = true
   666  	} else {
   667  		return nil, storagedriver.PathNotFoundError{Path: path}
   668  	}
   669  
   670  	return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
   671  }
   672  
   673  // List returns a list of the objects that are direct descendants of the given path.
   674  func (d *driver) List(path string) ([]string, error) {
   675  	if path != "/" && path[len(path)-1] != '/' {
   676  		path = path + "/"
   677  	}
   678  
   679  	// This is to cover for the cases when the rootDirectory of the driver is either "" or "/".
   680  	// In those cases, there is no root prefix to replace and we must actually add a "/" to all
   681  	// results in order to keep them as valid paths as recognized by storagedriver.PathRegexp
   682  	prefix := ""
   683  	if d.s3Path("") == "" {
   684  		prefix = "/"
   685  	}
   686  
   687  	listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax)
   688  	if err != nil {
   689  		return nil, err
   690  	}
   691  
   692  	files := []string{}
   693  	directories := []string{}
   694  
   695  	for {
   696  		for _, key := range listResponse.Contents {
   697  			files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1))
   698  		}
   699  
   700  		for _, commonPrefix := range listResponse.CommonPrefixes {
   701  			directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1))
   702  		}
   703  
   704  		if listResponse.IsTruncated {
   705  			listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax)
   706  			if err != nil {
   707  				return nil, err
   708  			}
   709  		} else {
   710  			break
   711  		}
   712  	}
   713  
   714  	return append(files, directories...), nil
   715  }
   716  
   717  // Move moves an object stored at sourcePath to destPath, removing the original
   718  // object.
   719  func (d *driver) Move(sourcePath string, destPath string) error {
   720  	/* This is terrible, but aws doesn't have an actual move. */
   721  	_, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(),
   722  		s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath))
   723  	if err != nil {
   724  		return parseError(sourcePath, err)
   725  	}
   726  
   727  	return d.Delete(sourcePath)
   728  }
   729  
   730  // Delete recursively deletes all objects stored at "path" and its subpaths.
   731  func (d *driver) Delete(path string) error {
   732  	listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax)
   733  	if err != nil || len(listResponse.Contents) == 0 {
   734  		return storagedriver.PathNotFoundError{Path: path}
   735  	}
   736  
   737  	s3Objects := make([]s3.Object, listMax)
   738  
   739  	for len(listResponse.Contents) > 0 {
   740  		for index, key := range listResponse.Contents {
   741  			s3Objects[index].Key = key.Key
   742  		}
   743  
   744  		err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]})
   745  		if err != nil {
   746  			return nil
   747  		}
   748  
   749  		listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax)
   750  		if err != nil {
   751  			return err
   752  		}
   753  	}
   754  
   755  	return nil
   756  }
   757  
   758  // URLFor returns a URL which may be used to retrieve the content stored at the given path.
   759  // May return an UnsupportedMethodErr in certain StorageDriver implementations.
   760  func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) {
   761  	methodString := "GET"
   762  	method, ok := options["method"]
   763  	if ok {
   764  		methodString, ok = method.(string)
   765  		if !ok || (methodString != "GET" && methodString != "HEAD") {
   766  			return "", storagedriver.ErrUnsupportedMethod
   767  		}
   768  	}
   769  
   770  	expiresTime := time.Now().Add(20 * time.Minute)
   771  	expires, ok := options["expiry"]
   772  	if ok {
   773  		et, ok := expires.(time.Time)
   774  		if ok {
   775  			expiresTime = et
   776  		}
   777  	}
   778  
   779  	return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil
   780  }
   781  
   782  func (d *driver) s3Path(path string) string {
   783  	return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/")
   784  }
   785  
   786  // S3BucketKey returns the s3 bucket key for the given storage driver path.
   787  func (d *Driver) S3BucketKey(path string) string {
   788  	return d.StorageDriver.(*driver).s3Path(path)
   789  }
   790  
   791  func parseError(path string, err error) error {
   792  	if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" {
   793  		return storagedriver.PathNotFoundError{Path: path}
   794  	}
   795  
   796  	return err
   797  }
   798  
   799  func hasCode(err error, code string) bool {
   800  	s3err, ok := err.(*aws.Error)
   801  	return ok && s3err.Code == code
   802  }
   803  
   804  func (d *driver) getOptions() s3.Options {
   805  	return s3.Options{SSE: d.Encrypt}
   806  }
   807  
   808  func getPermissions() s3.ACL {
   809  	return s3.Private
   810  }
   811  
   812  func (d *driver) getContentType() string {
   813  	return "application/octet-stream"
   814  }
   815  
   816  // getbuf returns a buffer from the driver's pool with length d.ChunkSize.
   817  func (d *driver) getbuf() []byte {
   818  	return d.pool.Get().([]byte)
   819  }
   820  
   821  func (d *driver) putbuf(p []byte) {
   822  	copy(p, d.zeros)
   823  	d.pool.Put(p)
   824  }