github.com/aavshr/aws-sdk-go@v1.41.3/service/s3/s3manager/upload.go (about)

     1  package s3manager
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"io"
     7  	"sort"
     8  	"sync"
     9  
    10  	"github.com/aavshr/aws-sdk-go/aws"
    11  	"github.com/aavshr/aws-sdk-go/aws/awserr"
    12  	"github.com/aavshr/aws-sdk-go/aws/awsutil"
    13  	"github.com/aavshr/aws-sdk-go/aws/client"
    14  	"github.com/aavshr/aws-sdk-go/aws/credentials"
    15  	"github.com/aavshr/aws-sdk-go/aws/request"
    16  	"github.com/aavshr/aws-sdk-go/service/s3"
    17  	"github.com/aavshr/aws-sdk-go/service/s3/s3iface"
    18  )
    19  
    20  // MaxUploadParts is the maximum allowed number of parts in a multi-part upload
    21  // on Amazon S3.
    22  const MaxUploadParts = 10000
    23  
    24  // MinUploadPartSize is the minimum allowed part size when uploading a part to
    25  // Amazon S3.
    26  const MinUploadPartSize int64 = 1024 * 1024 * 5
    27  
    28  // DefaultUploadPartSize is the default part size to buffer chunks of a
    29  // payload into.
    30  const DefaultUploadPartSize = MinUploadPartSize
    31  
    32  // DefaultUploadConcurrency is the default number of goroutines to spin up when
    33  // using Upload().
    34  const DefaultUploadConcurrency = 5
    35  
    36  // A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
    37  // will satisfy this interface when a multi part upload failed to upload all
    38  // chucks to S3. In the case of a failure the UploadID is needed to operate on
    39  // the chunks, if any, which were uploaded.
    40  //
    41  // Example:
    42  //
    43  //     u := s3manager.NewUploader(opts)
    44  //     output, err := u.upload(input)
    45  //     if err != nil {
    46  //         if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
    47  //             // Process error and its associated uploadID
    48  //             fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
    49  //         } else {
    50  //             // Process error generically
    51  //             fmt.Println("Error:", err.Error())
    52  //         }
    53  //     }
    54  //
    55  type MultiUploadFailure interface {
    56  	awserr.Error
    57  
    58  	// Returns the upload id for the S3 multipart upload that failed.
    59  	UploadID() string
    60  }
    61  
    62  // So that the Error interface type can be included as an anonymous field
    63  // in the multiUploadError struct and not conflict with the error.Error() method.
    64  type awsError awserr.Error
    65  
    66  // A multiUploadError wraps the upload ID of a failed s3 multipart upload.
    67  // Composed of BaseError for code, message, and original error
    68  //
    69  // Should be used for an error that occurred failing a S3 multipart upload,
    70  // and a upload ID is available. If an uploadID is not available a more relevant
    71  type multiUploadError struct {
    72  	awsError
    73  
    74  	// ID for multipart upload which failed.
    75  	uploadID string
    76  }
    77  
    78  // Error returns the string representation of the error.
    79  //
    80  // See apierr.BaseError ErrorWithExtra for output format
    81  //
    82  // Satisfies the error interface.
    83  func (m multiUploadError) Error() string {
    84  	extra := fmt.Sprintf("upload id: %s", m.uploadID)
    85  	return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
    86  }
    87  
    88  // String returns the string representation of the error.
    89  // Alias for Error to satisfy the stringer interface.
    90  func (m multiUploadError) String() string {
    91  	return m.Error()
    92  }
    93  
    94  // UploadID returns the id of the S3 upload which failed.
    95  func (m multiUploadError) UploadID() string {
    96  	return m.uploadID
    97  }
    98  
    99  // UploadOutput represents a response from the Upload() call.
   100  type UploadOutput struct {
   101  	// The URL where the object was uploaded to.
   102  	Location string
   103  
   104  	// The version of the object that was uploaded. Will only be populated if
   105  	// the S3 Bucket is versioned. If the bucket is not versioned this field
   106  	// will not be set.
   107  	VersionID *string
   108  
   109  	// The ID for a multipart upload to S3. In the case of an error the error
   110  	// can be cast to the MultiUploadFailure interface to extract the upload ID.
   111  	UploadID string
   112  
   113  	// Entity tag of the object.
   114  	ETag *string
   115  }
   116  
   117  // WithUploaderRequestOptions appends to the Uploader's API request options.
   118  func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
   119  	return func(u *Uploader) {
   120  		u.RequestOptions = append(u.RequestOptions, opts...)
   121  	}
   122  }
   123  
   124  // The Uploader structure that calls Upload(). It is safe to call Upload()
   125  // on this structure for multiple objects and across concurrent goroutines.
   126  // Mutating the Uploader's properties is not safe to be done concurrently.
   127  type Uploader struct {
   128  	// The buffer size (in bytes) to use when buffering data into chunks and
   129  	// sending them as parts to S3. The minimum allowed part size is 5MB, and
   130  	// if this value is set to zero, the DefaultUploadPartSize value will be used.
   131  	PartSize int64
   132  
   133  	// The number of goroutines to spin up in parallel per call to Upload when
   134  	// sending parts. If this is set to zero, the DefaultUploadConcurrency value
   135  	// will be used.
   136  	//
   137  	// The concurrency pool is not shared between calls to Upload.
   138  	Concurrency int
   139  
   140  	// Setting this value to true will cause the SDK to avoid calling
   141  	// AbortMultipartUpload on a failure, leaving all successfully uploaded
   142  	// parts on S3 for manual recovery.
   143  	//
   144  	// Note that storing parts of an incomplete multipart upload counts towards
   145  	// space usage on S3 and will add additional costs if not cleaned up.
   146  	LeavePartsOnError bool
   147  
   148  	// MaxUploadParts is the max number of parts which will be uploaded to S3.
   149  	// Will be used to calculate the partsize of the object to be uploaded.
   150  	// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
   151  	// as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts).
   152  	//
   153  	// MaxUploadParts must not be used to limit the total number of bytes uploaded.
   154  	// Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader)
   155  	// instead. An io.LimitReader is helpful when uploading an unbounded reader
   156  	// to S3, and you know its maximum size. Otherwise the reader's io.EOF returned
   157  	// error must be used to signal end of stream.
   158  	//
   159  	// Defaults to package const's MaxUploadParts value.
   160  	MaxUploadParts int
   161  
   162  	// The client to use when uploading to S3.
   163  	S3 s3iface.S3API
   164  
   165  	// List of request options that will be passed down to individual API
   166  	// operation requests made by the uploader.
   167  	RequestOptions []request.Option
   168  
   169  	// Defines the buffer strategy used when uploading a part
   170  	BufferProvider ReadSeekerWriteToProvider
   171  
   172  	// partPool allows for the re-usage of streaming payload part buffers between upload calls
   173  	partPool byteSlicePool
   174  }
   175  
   176  // NewUploader creates a new Uploader instance to upload objects to S3. Pass In
   177  // additional functional options to customize the uploader's behavior. Requires a
   178  // client.ConfigProvider in order to create a S3 service client. The session.Session
   179  // satisfies the client.ConfigProvider interface.
   180  //
   181  // Example:
   182  //     // The session the S3 Uploader will use
   183  //     sess := session.Must(session.NewSession())
   184  //
   185  //     // Create an uploader with the session and default options
   186  //     uploader := s3manager.NewUploader(sess)
   187  //
   188  //     // Create an uploader with the session and custom options
   189  //     uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
   190  //          u.PartSize = 64 * 1024 * 1024 // 64MB per part
   191  //     })
   192  func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
   193  	return newUploader(s3.New(c), options...)
   194  }
   195  
   196  func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
   197  	u := &Uploader{
   198  		S3:                client,
   199  		PartSize:          DefaultUploadPartSize,
   200  		Concurrency:       DefaultUploadConcurrency,
   201  		LeavePartsOnError: false,
   202  		MaxUploadParts:    MaxUploadParts,
   203  		BufferProvider:    defaultUploadBufferProvider(),
   204  	}
   205  
   206  	for _, option := range options {
   207  		option(u)
   208  	}
   209  
   210  	u.partPool = newByteSlicePool(u.PartSize)
   211  
   212  	return u
   213  }
   214  
   215  // NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
   216  // additional functional options to customize the uploader's behavior. Requires
   217  // a S3 service client to make S3 API calls.
   218  //
   219  // Example:
   220  //     // The session the S3 Uploader will use
   221  //     sess := session.Must(session.NewSession())
   222  //
   223  //     // S3 service client the Upload manager will use.
   224  //     s3Svc := s3.New(sess)
   225  //
   226  //     // Create an uploader with S3 client and default options
   227  //     uploader := s3manager.NewUploaderWithClient(s3Svc)
   228  //
   229  //     // Create an uploader with S3 client and custom options
   230  //     uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
   231  //          u.PartSize = 64 * 1024 * 1024 // 64MB per part
   232  //     })
   233  func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
   234  	return newUploader(svc, options...)
   235  }
   236  
   237  // Upload uploads an object to S3, intelligently buffering large files into
   238  // smaller chunks and sending them in parallel across multiple goroutines. You
   239  // can configure the buffer size and concurrency through the Uploader's parameters.
   240  //
   241  // Additional functional options can be provided to configure the individual
   242  // upload. These options are copies of the Uploader instance Upload is called from.
   243  // Modifying the options will not impact the original Uploader instance.
   244  //
   245  // Use the WithUploaderRequestOptions helper function to pass in request
   246  // options that will be applied to all API operations made with this uploader.
   247  //
   248  // It is safe to call this method concurrently across goroutines.
   249  //
   250  // Example:
   251  //     // Upload input parameters
   252  //     upParams := &s3manager.UploadInput{
   253  //         Bucket: &bucketName,
   254  //         Key:    &keyName,
   255  //         Body:   file,
   256  //     }
   257  //
   258  //     // Perform an upload.
   259  //     result, err := uploader.Upload(upParams)
   260  //
   261  //     // Perform upload with options different than the those in the Uploader.
   262  //     result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
   263  //          u.PartSize = 10 * 1024 * 1024 // 10MB part size
   264  //          u.LeavePartsOnError = true    // Don't delete the parts if the upload fails.
   265  //     })
   266  func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
   267  	return u.UploadWithContext(aws.BackgroundContext(), input, options...)
   268  }
   269  
   270  // UploadWithContext uploads an object to S3, intelligently buffering large
   271  // files into smaller chunks and sending them in parallel across multiple
   272  // goroutines. You can configure the buffer size and concurrency through the
   273  // Uploader's parameters.
   274  //
   275  // UploadWithContext is the same as Upload with the additional support for
   276  // Context input parameters. The Context must not be nil. A nil Context will
   277  // cause a panic. Use the context to add deadlining, timeouts, etc. The
   278  // UploadWithContext may create sub-contexts for individual underlying requests.
   279  //
   280  // Additional functional options can be provided to configure the individual
   281  // upload. These options are copies of the Uploader instance Upload is called from.
   282  // Modifying the options will not impact the original Uploader instance.
   283  //
   284  // Use the WithUploaderRequestOptions helper function to pass in request
   285  // options that will be applied to all API operations made with this uploader.
   286  //
   287  // It is safe to call this method concurrently across goroutines.
   288  func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) {
   289  	i := uploader{in: input, cfg: u, ctx: ctx}
   290  
   291  	for _, opt := range opts {
   292  		opt(&i.cfg)
   293  	}
   294  
   295  	i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
   296  
   297  	return i.upload()
   298  }
   299  
   300  // UploadWithIterator will upload a batched amount of objects to S3. This operation uses
   301  // the iterator pattern to know which object to upload next. Since this is an interface this
   302  // allows for custom defined functionality.
   303  //
   304  // Example:
   305  //	svc:= s3manager.NewUploader(sess)
   306  //
   307  //	objects := []BatchUploadObject{
   308  //		{
   309  //			Object:	&s3manager.UploadInput {
   310  //				Key: aws.String("key"),
   311  //				Bucket: aws.String("bucket"),
   312  //			},
   313  //		},
   314  //	}
   315  //
   316  //	iter := &s3manager.UploadObjectsIterator{Objects: objects}
   317  //	if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
   318  //		return err
   319  //	}
   320  func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error {
   321  	var errs []Error
   322  	for iter.Next() {
   323  		object := iter.UploadObject()
   324  		if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil {
   325  			s3Err := Error{
   326  				OrigErr: err,
   327  				Bucket:  object.Object.Bucket,
   328  				Key:     object.Object.Key,
   329  			}
   330  
   331  			errs = append(errs, s3Err)
   332  		}
   333  
   334  		if object.After == nil {
   335  			continue
   336  		}
   337  
   338  		if err := object.After(); err != nil {
   339  			s3Err := Error{
   340  				OrigErr: err,
   341  				Bucket:  object.Object.Bucket,
   342  				Key:     object.Object.Key,
   343  			}
   344  
   345  			errs = append(errs, s3Err)
   346  		}
   347  	}
   348  
   349  	if len(errs) > 0 {
   350  		return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs)
   351  	}
   352  	return nil
   353  }
   354  
   355  // internal structure to manage an upload to S3.
   356  type uploader struct {
   357  	ctx aws.Context
   358  	cfg Uploader
   359  
   360  	in *UploadInput
   361  
   362  	readerPos int64 // current reader position
   363  	totalSize int64 // set to -1 if the size is not known
   364  }
   365  
   366  // internal logic for deciding whether to upload a single part or use a
   367  // multipart upload.
   368  func (u *uploader) upload() (*UploadOutput, error) {
   369  	if err := u.init(); err != nil {
   370  		return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err)
   371  	}
   372  	defer u.cfg.partPool.Close()
   373  
   374  	if u.cfg.PartSize < MinUploadPartSize {
   375  		msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
   376  		return nil, awserr.New("ConfigError", msg, nil)
   377  	}
   378  
   379  	// Do one read to determine if we have more than one part
   380  	reader, _, cleanup, err := u.nextReader()
   381  	if err == io.EOF { // single part
   382  		return u.singlePart(reader, cleanup)
   383  	} else if err != nil {
   384  		cleanup()
   385  		return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
   386  	}
   387  
   388  	mu := multiuploader{uploader: u}
   389  	return mu.upload(reader, cleanup)
   390  }
   391  
   392  // init will initialize all default options.
   393  func (u *uploader) init() error {
   394  	if err := validateSupportedARNType(aws.StringValue(u.in.Bucket)); err != nil {
   395  		return err
   396  	}
   397  
   398  	if u.cfg.Concurrency == 0 {
   399  		u.cfg.Concurrency = DefaultUploadConcurrency
   400  	}
   401  	if u.cfg.PartSize == 0 {
   402  		u.cfg.PartSize = DefaultUploadPartSize
   403  	}
   404  	if u.cfg.MaxUploadParts == 0 {
   405  		u.cfg.MaxUploadParts = MaxUploadParts
   406  	}
   407  
   408  	// Try to get the total size for some optimizations
   409  	if err := u.initSize(); err != nil {
   410  		return err
   411  	}
   412  
   413  	// If PartSize was changed or partPool was never setup then we need to allocated a new pool
   414  	// so that we return []byte slices of the correct size
   415  	poolCap := u.cfg.Concurrency + 1
   416  	if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize {
   417  		u.cfg.partPool = newByteSlicePool(u.cfg.PartSize)
   418  		u.cfg.partPool.ModifyCapacity(poolCap)
   419  	} else {
   420  		u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool}
   421  		u.cfg.partPool.ModifyCapacity(poolCap)
   422  	}
   423  
   424  	return nil
   425  }
   426  
   427  // initSize tries to detect the total stream size, setting u.totalSize. If
   428  // the size is not known, totalSize is set to -1.
   429  func (u *uploader) initSize() error {
   430  	u.totalSize = -1
   431  
   432  	switch r := u.in.Body.(type) {
   433  	case io.Seeker:
   434  		n, err := aws.SeekerLen(r)
   435  		if err != nil {
   436  			return err
   437  		}
   438  		u.totalSize = n
   439  
   440  		// Try to adjust partSize if it is too small and account for
   441  		// integer division truncation.
   442  		if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
   443  			// Add one to the part size to account for remainders
   444  			// during the size calculation. e.g odd number of bytes.
   445  			u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
   446  		}
   447  	}
   448  
   449  	return nil
   450  }
   451  
   452  // nextReader returns a seekable reader representing the next packet of data.
   453  // This operation increases the shared u.readerPos counter, but note that it
   454  // does not need to be wrapped in a mutex because nextReader is only called
   455  // from the main thread.
   456  func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
   457  	switch r := u.in.Body.(type) {
   458  	case readerAtSeeker:
   459  		var err error
   460  
   461  		n := u.cfg.PartSize
   462  		if u.totalSize >= 0 {
   463  			bytesLeft := u.totalSize - u.readerPos
   464  
   465  			if bytesLeft <= u.cfg.PartSize {
   466  				err = io.EOF
   467  				n = bytesLeft
   468  			}
   469  		}
   470  
   471  		var (
   472  			reader  io.ReadSeeker
   473  			cleanup func()
   474  		)
   475  
   476  		reader = io.NewSectionReader(r, u.readerPos, n)
   477  		if u.cfg.BufferProvider != nil {
   478  			reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader)
   479  		} else {
   480  			cleanup = func() {}
   481  		}
   482  
   483  		u.readerPos += n
   484  
   485  		return reader, int(n), cleanup, err
   486  
   487  	default:
   488  		part, err := u.cfg.partPool.Get(u.ctx)
   489  		if err != nil {
   490  			return nil, 0, func() {}, err
   491  		}
   492  
   493  		n, err := readFillBuf(r, *part)
   494  		u.readerPos += int64(n)
   495  
   496  		cleanup := func() {
   497  			u.cfg.partPool.Put(part)
   498  		}
   499  
   500  		return bytes.NewReader((*part)[0:n]), n, cleanup, err
   501  	}
   502  }
   503  
   504  func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
   505  	for offset < len(b) && err == nil {
   506  		var n int
   507  		n, err = r.Read(b[offset:])
   508  		offset += n
   509  	}
   510  
   511  	return offset, err
   512  }
   513  
   514  // singlePart contains upload logic for uploading a single chunk via
   515  // a regular PutObject request. Multipart requests require at least two
   516  // parts, or at least 5MB of data.
   517  func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
   518  	defer cleanup()
   519  
   520  	params := &s3.PutObjectInput{}
   521  	awsutil.Copy(params, u.in)
   522  	params.Body = r
   523  
   524  	// Need to use request form because URL generated in request is
   525  	// used in return.
   526  	req, out := u.cfg.S3.PutObjectRequest(params)
   527  	req.SetContext(u.ctx)
   528  	req.ApplyOptions(u.cfg.RequestOptions...)
   529  	if err := req.Send(); err != nil {
   530  		return nil, err
   531  	}
   532  
   533  	url := req.HTTPRequest.URL.String()
   534  	return &UploadOutput{
   535  		Location:  url,
   536  		VersionID: out.VersionId,
   537  		ETag:      out.ETag,
   538  	}, nil
   539  }
   540  
   541  // internal structure to manage a specific multipart upload to S3.
   542  type multiuploader struct {
   543  	*uploader
   544  	wg       sync.WaitGroup
   545  	m        sync.Mutex
   546  	err      error
   547  	uploadID string
   548  	parts    completedParts
   549  }
   550  
   551  // keeps track of a single chunk of data being sent to S3.
   552  type chunk struct {
   553  	buf     io.ReadSeeker
   554  	num     int64
   555  	cleanup func()
   556  }
   557  
   558  // completedParts is a wrapper to make parts sortable by their part number,
   559  // since S3 required this list to be sent in sorted order.
   560  type completedParts []*s3.CompletedPart
   561  
   562  func (a completedParts) Len() int           { return len(a) }
   563  func (a completedParts) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
   564  func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
   565  
   566  // upload will perform a multipart upload using the firstBuf buffer containing
   567  // the first chunk of data.
   568  func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
   569  	params := &s3.CreateMultipartUploadInput{}
   570  	awsutil.Copy(params, u.in)
   571  
   572  	// Create the multipart
   573  	resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
   574  	if err != nil {
   575  		cleanup()
   576  		return nil, err
   577  	}
   578  	u.uploadID = *resp.UploadId
   579  
   580  	// Create the workers
   581  	ch := make(chan chunk, u.cfg.Concurrency)
   582  	for i := 0; i < u.cfg.Concurrency; i++ {
   583  		u.wg.Add(1)
   584  		go u.readChunk(ch)
   585  	}
   586  
   587  	// Send part 1 to the workers
   588  	var num int64 = 1
   589  	ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
   590  
   591  	// Read and queue the rest of the parts
   592  	for u.geterr() == nil && err == nil {
   593  		var (
   594  			reader       io.ReadSeeker
   595  			nextChunkLen int
   596  			ok           bool
   597  		)
   598  
   599  		reader, nextChunkLen, cleanup, err = u.nextReader()
   600  		ok, err = u.shouldContinue(num, nextChunkLen, err)
   601  		if !ok {
   602  			cleanup()
   603  			if err != nil {
   604  				u.seterr(err)
   605  			}
   606  			break
   607  		}
   608  
   609  		num++
   610  
   611  		ch <- chunk{buf: reader, num: num, cleanup: cleanup}
   612  	}
   613  
   614  	// Close the channel, wait for workers, and complete upload
   615  	close(ch)
   616  	u.wg.Wait()
   617  	complete := u.complete()
   618  
   619  	if err := u.geterr(); err != nil {
   620  		return nil, &multiUploadError{
   621  			awsError: awserr.New(
   622  				"MultipartUpload",
   623  				"upload multipart failed",
   624  				err),
   625  			uploadID: u.uploadID,
   626  		}
   627  	}
   628  
   629  	// Create a presigned URL of the S3 Get Object in order to have parity with
   630  	// single part upload.
   631  	getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
   632  		Bucket: u.in.Bucket,
   633  		Key:    u.in.Key,
   634  	})
   635  	getReq.Config.Credentials = credentials.AnonymousCredentials
   636  	getReq.SetContext(u.ctx)
   637  	uploadLocation, _, _ := getReq.PresignRequest(1)
   638  
   639  	return &UploadOutput{
   640  		Location:  uploadLocation,
   641  		VersionID: complete.VersionId,
   642  		UploadID:  u.uploadID,
   643  		ETag:      complete.ETag,
   644  	}, nil
   645  }
   646  
   647  func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) {
   648  	if err != nil && err != io.EOF {
   649  		return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err)
   650  	}
   651  
   652  	if nextChunkLen == 0 {
   653  		// No need to upload empty part, if file was empty to start
   654  		// with empty single part would of been created and never
   655  		// started multipart upload.
   656  		return false, nil
   657  	}
   658  
   659  	part++
   660  	// This upload exceeded maximum number of supported parts, error now.
   661  	if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) {
   662  		var msg string
   663  		if part > int64(u.cfg.MaxUploadParts) {
   664  			msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
   665  				u.cfg.MaxUploadParts)
   666  		} else {
   667  			msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
   668  				MaxUploadParts)
   669  		}
   670  		return false, awserr.New("TotalPartsExceeded", msg, nil)
   671  	}
   672  
   673  	return true, err
   674  }
   675  
   676  // readChunk runs in worker goroutines to pull chunks off of the ch channel
   677  // and send() them as UploadPart requests.
   678  func (u *multiuploader) readChunk(ch chan chunk) {
   679  	defer u.wg.Done()
   680  	for {
   681  		data, ok := <-ch
   682  
   683  		if !ok {
   684  			break
   685  		}
   686  
   687  		if u.geterr() == nil {
   688  			if err := u.send(data); err != nil {
   689  				u.seterr(err)
   690  			}
   691  		}
   692  
   693  		data.cleanup()
   694  	}
   695  }
   696  
   697  // send performs an UploadPart request and keeps track of the completed
   698  // part information.
   699  func (u *multiuploader) send(c chunk) error {
   700  	params := &s3.UploadPartInput{
   701  		Bucket:               u.in.Bucket,
   702  		Key:                  u.in.Key,
   703  		Body:                 c.buf,
   704  		UploadId:             &u.uploadID,
   705  		SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
   706  		SSECustomerKey:       u.in.SSECustomerKey,
   707  		PartNumber:           &c.num,
   708  	}
   709  
   710  	resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
   711  	if err != nil {
   712  		return err
   713  	}
   714  
   715  	n := c.num
   716  	completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
   717  
   718  	u.m.Lock()
   719  	u.parts = append(u.parts, completed)
   720  	u.m.Unlock()
   721  
   722  	return nil
   723  }
   724  
   725  // geterr is a thread-safe getter for the error object
   726  func (u *multiuploader) geterr() error {
   727  	u.m.Lock()
   728  	defer u.m.Unlock()
   729  
   730  	return u.err
   731  }
   732  
   733  // seterr is a thread-safe setter for the error object
   734  func (u *multiuploader) seterr(e error) {
   735  	u.m.Lock()
   736  	defer u.m.Unlock()
   737  
   738  	u.err = e
   739  }
   740  
   741  // fail will abort the multipart unless LeavePartsOnError is set to true.
   742  func (u *multiuploader) fail() {
   743  	if u.cfg.LeavePartsOnError {
   744  		return
   745  	}
   746  
   747  	params := &s3.AbortMultipartUploadInput{
   748  		Bucket:   u.in.Bucket,
   749  		Key:      u.in.Key,
   750  		UploadId: &u.uploadID,
   751  	}
   752  	_, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
   753  	if err != nil {
   754  		logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
   755  	}
   756  }
   757  
   758  // complete successfully completes a multipart upload and returns the response.
   759  func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
   760  	if u.geterr() != nil {
   761  		u.fail()
   762  		return nil
   763  	}
   764  
   765  	// Parts must be sorted in PartNumber order.
   766  	sort.Sort(u.parts)
   767  
   768  	params := &s3.CompleteMultipartUploadInput{
   769  		Bucket:          u.in.Bucket,
   770  		Key:             u.in.Key,
   771  		UploadId:        &u.uploadID,
   772  		MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
   773  	}
   774  	resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
   775  	if err != nil {
   776  		u.seterr(err)
   777  		u.fail()
   778  	}
   779  
   780  	return resp
   781  }
   782  
   783  type readerAtSeeker interface {
   784  	io.ReaderAt
   785  	io.ReadSeeker
   786  }