github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/object-multipart-handlers.go (about)

     1  // Copyright (c) 2015-2023 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package cmd
    19  
    20  import (
    21  	"bufio"
    22  	"context"
    23  	"io"
    24  	"net/http"
    25  	"net/url"
    26  	"sort"
    27  	"strconv"
    28  	"strings"
    29  	"time"
    30  
    31  	"github.com/google/uuid"
    32  	"github.com/minio/minio-go/v7"
    33  	"github.com/minio/minio-go/v7/pkg/encrypt"
    34  	"github.com/minio/minio-go/v7/pkg/tags"
    35  	"github.com/minio/minio/internal/amztime"
    36  	sse "github.com/minio/minio/internal/bucket/encryption"
    37  	objectlock "github.com/minio/minio/internal/bucket/object/lock"
    38  	"github.com/minio/minio/internal/bucket/replication"
    39  	"github.com/minio/minio/internal/config/cache"
    40  	"github.com/minio/minio/internal/config/dns"
    41  	"github.com/minio/minio/internal/config/storageclass"
    42  	"github.com/minio/minio/internal/crypto"
    43  	"github.com/minio/minio/internal/etag"
    44  	"github.com/minio/minio/internal/event"
    45  	"github.com/minio/minio/internal/fips"
    46  	"github.com/minio/minio/internal/handlers"
    47  	"github.com/minio/minio/internal/hash"
    48  	xhttp "github.com/minio/minio/internal/http"
    49  	"github.com/minio/minio/internal/logger"
    50  	"github.com/minio/mux"
    51  	"github.com/minio/pkg/v2/policy"
    52  	"github.com/minio/sio"
    53  )
    54  
    55  // Multipart objectAPIHandlers
    56  
    57  // NewMultipartUploadHandler - New multipart upload.
    58  // Notice: The S3 client can send secret keys in headers for encryption related jobs,
    59  // the handler should ensure to remove these keys before sending them to the object layer.
    60  // Currently these keys are:
    61  //   - X-Amz-Server-Side-Encryption-Customer-Key
    62  //   - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
    63  func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
    64  	ctx := newContext(r, w, "NewMultipartUpload")
    65  
    66  	defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
    67  
    68  	objectAPI := api.ObjectAPI()
    69  	if objectAPI == nil {
    70  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
    71  		return
    72  	}
    73  
    74  	vars := mux.Vars(r)
    75  	bucket := vars["bucket"]
    76  	object, err := unescapePath(vars["object"])
    77  	if err != nil {
    78  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
    79  		return
    80  	}
    81  
    82  	if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
    83  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
    84  		return
    85  	}
    86  
    87  	// Check if bucket encryption is enabled
    88  	sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
    89  	sseConfig.Apply(r.Header, sse.ApplyOptions{
    90  		AutoEncrypt: globalAutoEncryption,
    91  	})
    92  
    93  	// Validate storage class metadata if present
    94  	if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
    95  		if !storageclass.IsValid(sc) {
    96  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL)
    97  			return
    98  		}
    99  	}
   100  
   101  	encMetadata := map[string]string{}
   102  
   103  	if crypto.Requested(r.Header) {
   104  		if crypto.SSECopy.IsRequested(r.Header) {
   105  			writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
   106  			return
   107  		}
   108  
   109  		if crypto.SSEC.IsRequested(r.Header) && crypto.S3.IsRequested(r.Header) {
   110  			writeErrorResponse(ctx, w, toAPIError(ctx, crypto.ErrIncompatibleEncryptionMethod), r.URL)
   111  			return
   112  		}
   113  
   114  		if crypto.SSEC.IsRequested(r.Header) && crypto.S3KMS.IsRequested(r.Header) {
   115  			writeErrorResponse(ctx, w, toAPIError(ctx, crypto.ErrIncompatibleEncryptionMethod), r.URL)
   116  			return
   117  		}
   118  
   119  		if crypto.SSEC.IsRequested(r.Header) && isCompressible(r.Header, object) {
   120  			writeErrorResponse(ctx, w, toAPIError(ctx, crypto.ErrIncompatibleEncryptionWithCompression), r.URL)
   121  			return
   122  		}
   123  
   124  		_, sourceReplReq := r.Header[xhttp.MinIOSourceReplicationRequest]
   125  		ssecRepHeaders := []string{
   126  			"X-Minio-Replication-Server-Side-Encryption-Seal-Algorithm",
   127  			"X-Minio-Replication-Server-Side-Encryption-Sealed-Key",
   128  			"X-Minio-Replication-Server-Side-Encryption-Iv",
   129  		}
   130  		ssecRep := false
   131  		for _, header := range ssecRepHeaders {
   132  			if val := r.Header.Get(header); val != "" {
   133  				ssecRep = true
   134  				break
   135  			}
   136  		}
   137  		if !(ssecRep && sourceReplReq) {
   138  			if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
   139  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   140  				return
   141  			}
   142  		}
   143  		// Set this for multipart only operations, we need to differentiate during
   144  		// decryption if the file was actually multipart or not.
   145  		encMetadata[ReservedMetadataPrefix+"Encrypted-Multipart"] = ""
   146  	}
   147  
   148  	// Extract metadata that needs to be saved.
   149  	metadata, err := extractMetadataFromReq(ctx, r)
   150  	if err != nil {
   151  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   152  		return
   153  	}
   154  
   155  	if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
   156  		if _, err := tags.ParseObjectTags(objTags); err != nil {
   157  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   158  			return
   159  		}
   160  
   161  		metadata[xhttp.AmzObjectTagging] = objTags
   162  	}
   163  	if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
   164  		metadata[ReservedMetadataPrefixLower+ReplicaStatus] = replication.Replica.String()
   165  		metadata[ReservedMetadataPrefixLower+ReplicaTimestamp] = UTCNow().Format(time.RFC3339Nano)
   166  	}
   167  	retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, policy.PutObjectRetentionAction)
   168  	holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, policy.PutObjectLegalHoldAction)
   169  
   170  	getObjectInfo := objectAPI.GetObjectInfo
   171  
   172  	retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
   173  	if s3Err == ErrNone && retentionMode.Valid() {
   174  		metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
   175  		metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = amztime.ISO8601Format(retentionDate.UTC())
   176  	}
   177  	if s3Err == ErrNone && legalHold.Status.Valid() {
   178  		metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
   179  	}
   180  	if s3Err != ErrNone {
   181  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
   182  		return
   183  	}
   184  	if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(metadata, "", "", replication.ObjectReplicationType, ObjectOptions{})); dsc.ReplicateAny() {
   185  		metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
   186  		metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
   187  	}
   188  
   189  	// We need to preserve the encryption headers set in EncryptRequest,
   190  	// so we do not want to override them, copy them instead.
   191  	for k, v := range encMetadata {
   192  		metadata[k] = v
   193  	}
   194  
   195  	// Ensure that metadata does not contain sensitive information
   196  	crypto.RemoveSensitiveEntries(metadata)
   197  
   198  	if isCompressible(r.Header, object) {
   199  		// Storing the compression metadata.
   200  		metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
   201  	}
   202  
   203  	opts, err := putOptsFromReq(ctx, r, bucket, object, metadata)
   204  	if err != nil {
   205  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   206  		return
   207  	}
   208  
   209  	if opts.PreserveETag != "" ||
   210  		r.Header.Get(xhttp.IfMatch) != "" ||
   211  		r.Header.Get(xhttp.IfNoneMatch) != "" {
   212  		opts.CheckPrecondFn = func(oi ObjectInfo) bool {
   213  			if _, err := DecryptObjectInfo(&oi, r); err != nil {
   214  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   215  				return true
   216  			}
   217  			return checkPreconditionsPUT(ctx, w, r, oi, opts)
   218  		}
   219  	}
   220  
   221  	checksumType := hash.NewChecksumType(r.Header.Get(xhttp.AmzChecksumAlgo))
   222  	if checksumType.Is(hash.ChecksumInvalid) {
   223  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequestParameter), r.URL)
   224  		return
   225  	} else if checksumType.IsSet() && !checksumType.Is(hash.ChecksumTrailing) {
   226  		opts.WantChecksum = &hash.Checksum{Type: checksumType}
   227  	}
   228  
   229  	newMultipartUpload := objectAPI.NewMultipartUpload
   230  
   231  	res, err := newMultipartUpload(ctx, bucket, object, opts)
   232  	if err != nil {
   233  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   234  		return
   235  	}
   236  
   237  	response := generateInitiateMultipartUploadResponse(bucket, object, res.UploadID)
   238  	if res.ChecksumAlgo != "" {
   239  		w.Header().Set(xhttp.AmzChecksumAlgo, res.ChecksumAlgo)
   240  	}
   241  	encodedSuccessResponse := encodeResponse(response)
   242  
   243  	// Write success response.
   244  	writeSuccessResponseXML(w, encodedSuccessResponse)
   245  }
   246  
   247  // CopyObjectPartHandler - uploads a part by copying data from an existing object as data source.
   248  func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
   249  	ctx := newContext(r, w, "CopyObjectPart")
   250  
   251  	defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
   252  
   253  	objectAPI := api.ObjectAPI()
   254  	if objectAPI == nil {
   255  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
   256  		return
   257  	}
   258  
   259  	if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
   260  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
   261  		return
   262  	}
   263  
   264  	vars := mux.Vars(r)
   265  	dstBucket := vars["bucket"]
   266  	dstObject, err := unescapePath(vars["object"])
   267  	if err != nil {
   268  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   269  		return
   270  	}
   271  
   272  	if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
   273  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   274  		return
   275  	}
   276  
   277  	// Read escaped copy source path to check for parameters.
   278  	cpSrcPath := r.Header.Get(xhttp.AmzCopySource)
   279  	var vid string
   280  	if u, err := url.Parse(cpSrcPath); err == nil {
   281  		vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID))
   282  		// Note that url.Parse does the unescaping
   283  		cpSrcPath = u.Path
   284  	}
   285  
   286  	srcBucket, srcObject := path2BucketObject(cpSrcPath)
   287  	// If source object is empty or bucket is empty, reply back invalid copy source.
   288  	if srcObject == "" || srcBucket == "" {
   289  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
   290  		return
   291  	}
   292  
   293  	if vid != "" && vid != nullVersionID {
   294  		_, err := uuid.Parse(vid)
   295  		if err != nil {
   296  			writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{
   297  				Bucket:    srcBucket,
   298  				Object:    srcObject,
   299  				VersionID: vid,
   300  			}), r.URL)
   301  			return
   302  		}
   303  	}
   304  
   305  	if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone {
   306  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   307  		return
   308  	}
   309  
   310  	uploadID := r.Form.Get(xhttp.UploadID)
   311  	partIDString := r.Form.Get(xhttp.PartNumber)
   312  
   313  	partID, err := strconv.Atoi(partIDString)
   314  	if err != nil || partID <= 0 {
   315  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL)
   316  		return
   317  	}
   318  
   319  	// check partID with maximum part ID for multipart objects
   320  	if isMaxPartID(partID) {
   321  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
   322  		return
   323  	}
   324  
   325  	var srcOpts, dstOpts ObjectOptions
   326  	srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject)
   327  	if err != nil {
   328  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   329  		return
   330  	}
   331  	srcOpts.VersionID = vid
   332  
   333  	// convert copy src and dst encryption options for GET/PUT calls
   334  	getOpts := ObjectOptions{VersionID: srcOpts.VersionID}
   335  	if srcOpts.ServerSideEncryption != nil {
   336  		getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption)
   337  	}
   338  
   339  	dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil)
   340  	if err != nil {
   341  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   342  		return
   343  	}
   344  
   345  	getObjectNInfo := objectAPI.GetObjectNInfo
   346  
   347  	// Get request range.
   348  	var rs *HTTPRangeSpec
   349  	var parseRangeErr error
   350  	if rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange); rangeHeader != "" {
   351  		rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader)
   352  	} else {
   353  		// This check is to see if client specified a header but the value
   354  		// is empty for 'x-amz-copy-source-range'
   355  		_, ok := r.Header[xhttp.AmzCopySourceRange]
   356  		if ok {
   357  			parseRangeErr = errInvalidRange
   358  		}
   359  	}
   360  
   361  	checkCopyPartPrecondFn := func(o ObjectInfo) bool {
   362  		if _, err := DecryptObjectInfo(&o, r); err != nil {
   363  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   364  			return true
   365  		}
   366  		if checkCopyObjectPartPreconditions(ctx, w, r, o) {
   367  			return true
   368  		}
   369  		if parseRangeErr != nil {
   370  			writeCopyPartErr(ctx, w, parseRangeErr, r.URL)
   371  			// Range header mismatch is pre-condition like failure
   372  			// so return true to indicate Range precondition failed.
   373  			return true
   374  		}
   375  		return false
   376  	}
   377  	getOpts.CheckPrecondFn = checkCopyPartPrecondFn
   378  	gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, getOpts)
   379  	if err != nil {
   380  		if isErrPreconditionFailed(err) {
   381  			return
   382  		}
   383  		if globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject) && gr != nil {
   384  			// Versioning enabled quite possibly object is deleted might be delete-marker
   385  			// if present set the headers, no idea why AWS S3 sets these headers.
   386  			if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
   387  				w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
   388  				w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
   389  			}
   390  		}
   391  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   392  		return
   393  	}
   394  	defer gr.Close()
   395  	srcInfo := gr.ObjInfo
   396  
   397  	actualPartSize, err := srcInfo.GetActualSize()
   398  	if err != nil {
   399  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   400  		return
   401  	}
   402  
   403  	if err := enforceBucketQuotaHard(ctx, dstBucket, actualPartSize); err != nil {
   404  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   405  		return
   406  	}
   407  
   408  	// Special care for CopyObjectPart
   409  	if partRangeErr := checkCopyPartRangeWithSize(rs, actualPartSize); partRangeErr != nil {
   410  		writeCopyPartErr(ctx, w, partRangeErr, r.URL)
   411  		return
   412  	}
   413  
   414  	// Get the object offset & length
   415  	startOffset, length, err := rs.GetOffsetLength(actualPartSize)
   416  	if err != nil {
   417  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   418  		return
   419  	}
   420  
   421  	// maximum copy size for multipart objects in a single operation
   422  	if isMaxObjectSize(length) {
   423  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
   424  		return
   425  	}
   426  
   427  	if isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) {
   428  		var dstRecords []dns.SrvRecord
   429  		dstRecords, err = globalDNSConfig.Get(dstBucket)
   430  		if err != nil {
   431  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   432  			return
   433  		}
   434  
   435  		// Send PutObject request to appropriate instance (in federated deployment)
   436  		core, rerr := getRemoteInstanceClient(r, getHostFromSrv(dstRecords))
   437  		if rerr != nil {
   438  			writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL)
   439  			return
   440  		}
   441  
   442  		popts := minio.PutObjectPartOptions{
   443  			SSE: dstOpts.ServerSideEncryption,
   444  		}
   445  
   446  		partInfo, err := core.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, gr, length, popts)
   447  		if err != nil {
   448  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   449  			return
   450  		}
   451  
   452  		response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
   453  		encodedSuccessResponse := encodeResponse(response)
   454  
   455  		// Write success response.
   456  		writeSuccessResponseXML(w, encodedSuccessResponse)
   457  		return
   458  	}
   459  
   460  	actualPartSize = length
   461  	var reader io.Reader = etag.NewReader(ctx, gr, nil, nil)
   462  
   463  	mi, err := objectAPI.GetMultipartInfo(ctx, dstBucket, dstObject, uploadID, dstOpts)
   464  	if err != nil {
   465  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   466  		return
   467  	}
   468  
   469  	_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
   470  
   471  	// Read compression metadata preserved in the init multipart for the decision.
   472  	_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
   473  	// Compress only if the compression is enabled during initial multipart.
   474  	var idxCb func() []byte
   475  	if isCompressed {
   476  		wantEncryption := crypto.Requested(r.Header) || isEncrypted
   477  		s2c, cb := newS2CompressReader(reader, actualPartSize, wantEncryption)
   478  		idxCb = cb
   479  		defer s2c.Close()
   480  		reader = etag.Wrap(s2c, reader)
   481  		length = -1
   482  	}
   483  
   484  	srcInfo.Reader, err = hash.NewReader(ctx, reader, length, "", "", actualPartSize)
   485  	if err != nil {
   486  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   487  		return
   488  	}
   489  
   490  	dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, mi.UserDefined)
   491  	if err != nil {
   492  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   493  		return
   494  	}
   495  	dstOpts.IndexCB = idxCb
   496  
   497  	rawReader := srcInfo.Reader
   498  	pReader := NewPutObjReader(rawReader)
   499  
   500  	var objectEncryptionKey crypto.ObjectKey
   501  	if isEncrypted {
   502  		if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
   503  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
   504  			return
   505  		}
   506  		if crypto.S3.IsEncrypted(mi.UserDefined) && crypto.SSEC.IsRequested(r.Header) {
   507  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
   508  			return
   509  		}
   510  		var key []byte
   511  		if crypto.SSEC.IsRequested(r.Header) {
   512  			key, err = ParseSSECustomerRequest(r)
   513  			if err != nil {
   514  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   515  				return
   516  			}
   517  		}
   518  		key, err = decryptObjectMeta(key, dstBucket, dstObject, mi.UserDefined)
   519  		if err != nil {
   520  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   521  			return
   522  		}
   523  		copy(objectEncryptionKey[:], key)
   524  
   525  		partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
   526  		encReader, err := sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()})
   527  		if err != nil {
   528  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   529  			return
   530  		}
   531  		reader = etag.Wrap(encReader, reader)
   532  
   533  		wantSize := int64(-1)
   534  		if length >= 0 {
   535  			info := ObjectInfo{Size: length}
   536  			wantSize = info.EncryptedSize()
   537  		}
   538  
   539  		srcInfo.Reader, err = hash.NewReader(ctx, reader, wantSize, "", "", actualPartSize)
   540  		if err != nil {
   541  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   542  			return
   543  		}
   544  		pReader, err = pReader.WithEncryption(srcInfo.Reader, &objectEncryptionKey)
   545  		if err != nil {
   546  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   547  			return
   548  		}
   549  		if dstOpts.IndexCB != nil {
   550  			dstOpts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, dstOpts.IndexCB)
   551  		}
   552  	}
   553  
   554  	srcInfo.PutObjReader = pReader
   555  	copyObjectPart := objectAPI.CopyObjectPart
   556  
   557  	// Copy source object to destination, if source and destination
   558  	// object is same then only metadata is updated.
   559  	partInfo, err := copyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
   560  		startOffset, length, srcInfo, srcOpts, dstOpts)
   561  	if err != nil {
   562  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   563  		return
   564  	}
   565  
   566  	if isEncrypted {
   567  		sseS3 := crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(mi.UserDefined)
   568  		partInfo.ETag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, sseS3)
   569  	}
   570  
   571  	response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
   572  	encodedSuccessResponse := encodeResponse(response)
   573  
   574  	// Write success response.
   575  	writeSuccessResponseXML(w, encodedSuccessResponse)
   576  }
   577  
   578  // PutObjectPartHandler - uploads an incoming part for an ongoing multipart operation.
   579  func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
   580  	ctx := newContext(r, w, "PutObjectPart")
   581  
   582  	defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
   583  
   584  	objectAPI := api.ObjectAPI()
   585  	if objectAPI == nil {
   586  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
   587  		return
   588  	}
   589  
   590  	vars := mux.Vars(r)
   591  	bucket := vars["bucket"]
   592  	object, err := unescapePath(vars["object"])
   593  	if err != nil {
   594  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   595  		return
   596  	}
   597  
   598  	// X-Amz-Copy-Source shouldn't be set for this call.
   599  	if _, ok := r.Header[xhttp.AmzCopySource]; ok {
   600  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
   601  		return
   602  	}
   603  
   604  	clientETag, err := etag.FromContentMD5(r.Header)
   605  	if err != nil {
   606  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
   607  		return
   608  	}
   609  
   610  	// if Content-Length is unknown/missing, throw away
   611  	size := r.ContentLength
   612  
   613  	rAuthType := getRequestAuthType(r)
   614  	// For auth type streaming signature, we need to gather a different content length.
   615  	switch rAuthType {
   616  	// Check signature types that must have content length
   617  	case authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
   618  		if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
   619  			if sizeStr[0] == "" {
   620  				writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
   621  				return
   622  			}
   623  			size, err = strconv.ParseInt(sizeStr[0], 10, 64)
   624  			if err != nil {
   625  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   626  				return
   627  			}
   628  		}
   629  	}
   630  
   631  	if size == -1 {
   632  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
   633  		return
   634  	}
   635  
   636  	uploadID := r.Form.Get(xhttp.UploadID)
   637  	partIDString := r.Form.Get(xhttp.PartNumber)
   638  
   639  	partID, err := strconv.Atoi(partIDString)
   640  	if err != nil || partID <= 0 {
   641  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL)
   642  		return
   643  	}
   644  
   645  	// maximum size for multipart objects in a single operation
   646  	if isMaxObjectSize(size) {
   647  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
   648  		return
   649  	}
   650  
   651  	// check partID with maximum part ID for multipart objects
   652  	if isMaxPartID(partID) {
   653  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
   654  		return
   655  	}
   656  
   657  	var (
   658  		md5hex              = clientETag.String()
   659  		sha256hex           = ""
   660  		reader    io.Reader = r.Body
   661  		s3Error   APIErrorCode
   662  	)
   663  	if s3Error = isPutActionAllowed(ctx, rAuthType, bucket, object, r, policy.PutObjectAction); s3Error != ErrNone {
   664  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   665  		return
   666  	}
   667  
   668  	switch rAuthType {
   669  	case authTypeStreamingSigned, authTypeStreamingSignedTrailer:
   670  		// Initialize stream signature verifier.
   671  		reader, s3Error = newSignV4ChunkedReader(r, rAuthType == authTypeStreamingSignedTrailer)
   672  		if s3Error != ErrNone {
   673  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   674  			return
   675  		}
   676  	case authTypeStreamingUnsignedTrailer:
   677  		// Initialize stream signature verifier.
   678  		reader, s3Error = newUnsignedV4ChunkedReader(r, true)
   679  		if s3Error != ErrNone {
   680  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   681  			return
   682  		}
   683  	case authTypeSignedV2, authTypePresignedV2:
   684  		if s3Error = isReqAuthenticatedV2(r); s3Error != ErrNone {
   685  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   686  			return
   687  		}
   688  	case authTypePresigned, authTypeSigned:
   689  		if s3Error = reqSignatureV4Verify(r, globalSite.Region, serviceS3); s3Error != ErrNone {
   690  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   691  			return
   692  		}
   693  
   694  		if !skipContentSha256Cksum(r) {
   695  			sha256hex = getContentSha256Cksum(r, serviceS3)
   696  		}
   697  	}
   698  
   699  	if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
   700  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   701  		return
   702  	}
   703  
   704  	actualSize := size
   705  
   706  	// get encryption options
   707  	var opts ObjectOptions
   708  	if crypto.SSEC.IsRequested(r.Header) {
   709  		opts, err = getOpts(ctx, r, bucket, object)
   710  		if err != nil {
   711  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   712  			return
   713  		}
   714  	}
   715  
   716  	mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
   717  	if err != nil {
   718  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   719  		return
   720  	}
   721  
   722  	// Read compression metadata preserved in the init multipart for the decision.
   723  	_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
   724  	var idxCb func() []byte
   725  	if isCompressed {
   726  		actualReader, err := hash.NewReader(ctx, reader, size, md5hex, sha256hex, actualSize)
   727  		if err != nil {
   728  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   729  			return
   730  		}
   731  		if err = actualReader.AddChecksum(r, false); err != nil {
   732  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
   733  			return
   734  		}
   735  
   736  		// Set compression metrics.
   737  		wantEncryption := crypto.Requested(r.Header)
   738  		s2c, cb := newS2CompressReader(actualReader, actualSize, wantEncryption)
   739  		idxCb = cb
   740  		defer s2c.Close()
   741  		reader = etag.Wrap(s2c, actualReader)
   742  		size = -1   // Since compressed size is un-predictable.
   743  		md5hex = "" // Do not try to verify the content.
   744  		sha256hex = ""
   745  	}
   746  
   747  	var forceMD5 []byte
   748  	// Optimization: If SSE-KMS and SSE-C did not request Content-Md5. Use uuid as etag. Optionally enable this also
   749  	// for server that is started with `--no-compat`.
   750  	if !etag.ContentMD5Requested(r.Header) && (crypto.S3KMS.IsEncrypted(mi.UserDefined) || crypto.SSEC.IsRequested(r.Header) || !globalServerCtxt.StrictS3Compat) {
   751  		forceMD5 = mustGetUUIDBytes()
   752  	}
   753  
   754  	hashReader, err := hash.NewReaderWithOpts(ctx, reader, hash.Options{
   755  		Size:       size,
   756  		MD5Hex:     md5hex,
   757  		SHA256Hex:  sha256hex,
   758  		ActualSize: actualSize,
   759  		DisableMD5: false,
   760  		ForceMD5:   forceMD5,
   761  	})
   762  	if err != nil {
   763  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   764  		return
   765  	}
   766  
   767  	if err := hashReader.AddChecksum(r, size < 0); err != nil {
   768  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
   769  		return
   770  	}
   771  
   772  	pReader := NewPutObjReader(hashReader)
   773  
   774  	_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
   775  	_, replicationStatus := mi.UserDefined[xhttp.AmzBucketReplicationStatus]
   776  	var objectEncryptionKey crypto.ObjectKey
   777  	if isEncrypted {
   778  		if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) && !replicationStatus {
   779  			writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
   780  			return
   781  		}
   782  
   783  		opts, err = putOptsFromReq(ctx, r, bucket, object, mi.UserDefined)
   784  		if err != nil {
   785  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   786  			return
   787  		}
   788  
   789  		var key []byte
   790  		if crypto.SSEC.IsRequested(r.Header) {
   791  			key, err = ParseSSECustomerRequest(r)
   792  			if err != nil {
   793  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   794  				return
   795  			}
   796  		}
   797  
   798  		_, sourceReplReq := r.Header[xhttp.MinIOSourceReplicationRequest]
   799  		if !(sourceReplReq && crypto.SSEC.IsEncrypted(mi.UserDefined)) {
   800  			// Calculating object encryption key
   801  			key, err = decryptObjectMeta(key, bucket, object, mi.UserDefined)
   802  			if err != nil {
   803  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   804  				return
   805  			}
   806  			copy(objectEncryptionKey[:], key)
   807  
   808  			partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
   809  			in := io.Reader(hashReader)
   810  			if size > encryptBufferThreshold {
   811  				// The encryption reads in blocks of 64KB.
   812  				// We add a buffer on bigger files to reduce the number of syscalls upstream.
   813  				in = bufio.NewReaderSize(hashReader, encryptBufferSize)
   814  			}
   815  			reader, err = sio.EncryptReader(in, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()})
   816  			if err != nil {
   817  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   818  				return
   819  			}
   820  			wantSize := int64(-1)
   821  			if size >= 0 {
   822  				info := ObjectInfo{Size: size}
   823  				wantSize = info.EncryptedSize()
   824  			}
   825  			// do not try to verify encrypted content
   826  			hashReader, err = hash.NewReader(ctx, etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
   827  			if err != nil {
   828  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   829  				return
   830  			}
   831  			if err := hashReader.AddChecksum(r, true); err != nil {
   832  				writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
   833  				return
   834  			}
   835  
   836  			pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
   837  			if err != nil {
   838  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   839  				return
   840  			}
   841  
   842  			if idxCb != nil {
   843  				idxCb = compressionIndexEncrypter(objectEncryptionKey, idxCb)
   844  			}
   845  			opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
   846  		}
   847  	}
   848  	opts.IndexCB = idxCb
   849  
   850  	putObjectPart := objectAPI.PutObjectPart
   851  
   852  	partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, pReader, opts)
   853  	if err != nil {
   854  		// Verify if the underlying error is signature mismatch.
   855  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   856  		return
   857  	}
   858  
   859  	etag := partInfo.ETag
   860  	if kind, encrypted := crypto.IsEncrypted(mi.UserDefined); encrypted {
   861  		switch kind {
   862  		case crypto.S3KMS:
   863  			w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
   864  			w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, mi.KMSKeyID())
   865  			if kmsCtx, ok := mi.UserDefined[crypto.MetaContext]; ok {
   866  				w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
   867  			}
   868  			if len(etag) >= 32 && strings.Count(etag, "-") != 1 {
   869  				etag = etag[len(etag)-32:]
   870  			}
   871  		case crypto.S3:
   872  			w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
   873  			etag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: etag})
   874  		case crypto.SSEC:
   875  			w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
   876  			w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
   877  
   878  			if len(etag) >= 32 && strings.Count(etag, "-") != 1 {
   879  				etag = etag[len(etag)-32:]
   880  			}
   881  		}
   882  	}
   883  
   884  	// We must not use the http.Header().Set method here because some (broken)
   885  	// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
   886  	// Therefore, we have to set the ETag directly as map entry.
   887  	w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""}
   888  	hash.TransferChecksumHeader(w, r)
   889  
   890  	writeSuccessResponseHeadersOnly(w)
   891  }
   892  
   893  // CompleteMultipartUploadHandler - Complete multipart upload.
   894  func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
   895  	ctx := newContext(r, w, "CompleteMultipartUpload")
   896  
   897  	defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
   898  
   899  	vars := mux.Vars(r)
   900  	bucket := vars["bucket"]
   901  	object, err := unescapePath(vars["object"])
   902  	if err != nil {
   903  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   904  		return
   905  	}
   906  
   907  	objectAPI := api.ObjectAPI()
   908  	if objectAPI == nil {
   909  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
   910  		return
   911  	}
   912  
   913  	if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
   914  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   915  		return
   916  	}
   917  
   918  	// Get upload id.
   919  	uploadID, _, _, _, s3Error := getObjectResources(r.Form)
   920  	if s3Error != ErrNone {
   921  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
   922  		return
   923  	}
   924  
   925  	// Content-Length is required and should be non-zero
   926  	if r.ContentLength <= 0 {
   927  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingPart), r.URL)
   928  		return
   929  	}
   930  
   931  	complMultipartUpload := &CompleteMultipartUpload{}
   932  	if err = xmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil {
   933  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   934  		return
   935  	}
   936  	if len(complMultipartUpload.Parts) == 0 {
   937  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingPart), r.URL)
   938  		return
   939  	}
   940  
   941  	if !sort.SliceIsSorted(complMultipartUpload.Parts, func(i, j int) bool {
   942  		return complMultipartUpload.Parts[i].PartNumber < complMultipartUpload.Parts[j].PartNumber
   943  	}) {
   944  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartOrder), r.URL)
   945  		return
   946  	}
   947  
   948  	// Reject retention or governance headers if set, CompleteMultipartUpload spec
   949  	// does not use these headers, and should not be passed down to checkPutObjectLockAllowed
   950  	if objectlock.IsObjectLockRequested(r.Header) || objectlock.IsObjectLockGovernanceBypassSet(r.Header) {
   951  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
   952  		return
   953  	}
   954  
   955  	if _, _, _, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, objectAPI.GetObjectInfo, ErrNone, ErrNone); s3Err != ErrNone {
   956  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
   957  		return
   958  	}
   959  
   960  	completeMultiPartUpload := objectAPI.CompleteMultipartUpload
   961  
   962  	versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
   963  	suspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
   964  	os := newObjSweeper(bucket, object).WithVersioning(versioned, suspended)
   965  	if !globalTierConfigMgr.Empty() {
   966  		// Get appropriate object info to identify the remote object to delete
   967  		goiOpts := os.GetOpts()
   968  		if goi, gerr := objectAPI.GetObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
   969  			os.SetTransitionState(goi.TransitionedObject)
   970  		}
   971  	}
   972  
   973  	opts, err := completeMultipartOpts(ctx, r, bucket, object)
   974  	if err != nil {
   975  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
   976  		return
   977  	}
   978  	opts.Versioned = versioned
   979  	opts.VersionSuspended = suspended
   980  
   981  	// First, we compute the ETag of the multipart object.
   982  	// The ETag of a multi-part object is always:
   983  	//   ETag := MD5(ETag_p1, ETag_p2, ...)+"-N"   (N being the number of parts)
   984  	//
   985  	// This is independent of encryption. An encrypted multipart
   986  	// object also has an ETag that is the MD5 of its part ETags.
   987  	// The fact the in case of encryption the ETag of a part is
   988  	// not the MD5 of the part content does not change that.
   989  	var completeETags []etag.ETag
   990  	for _, part := range complMultipartUpload.Parts {
   991  		ETag, err := etag.Parse(part.ETag)
   992  		if err != nil {
   993  			continue
   994  		}
   995  		completeETags = append(completeETags, ETag)
   996  	}
   997  	multipartETag := etag.Multipart(completeETags...)
   998  	opts.UserDefined["etag"] = multipartETag.String()
   999  
  1000  	objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, complMultipartUpload.Parts, opts)
  1001  	if err != nil {
  1002  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
  1003  		return
  1004  	}
  1005  
  1006  	opts.EncryptFn, err = objInfo.metadataEncryptFn(r.Header)
  1007  	if err != nil {
  1008  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
  1009  		return
  1010  	}
  1011  	if r.Header.Get(xMinIOExtract) == "true" && HasSuffix(object, archiveExt) {
  1012  		opts := ObjectOptions{VersionID: objInfo.VersionID, MTime: objInfo.ModTime}
  1013  		if _, err := updateObjectMetadataWithZipInfo(ctx, objectAPI, bucket, object, opts); err != nil {
  1014  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
  1015  			return
  1016  		}
  1017  	}
  1018  
  1019  	setPutObjHeaders(w, objInfo, false)
  1020  	if dsc := mustReplicate(ctx, bucket, object, objInfo.getMustReplicateOptions(replication.ObjectReplicationType, opts)); dsc.ReplicateAny() {
  1021  		scheduleReplication(ctx, objInfo, objectAPI, dsc, replication.ObjectReplicationType)
  1022  	}
  1023  	if _, ok := r.Header[xhttp.MinIOSourceReplicationRequest]; ok {
  1024  		actualSize, _ := objInfo.GetActualSize()
  1025  		defer globalReplicationStats.UpdateReplicaStat(bucket, actualSize)
  1026  	}
  1027  
  1028  	// Get object location.
  1029  	location := getObjectLocation(r, globalDomainNames, bucket, object)
  1030  	// Generate complete multipart response.
  1031  	response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo)
  1032  	encodedSuccessResponse := encodeResponse(response)
  1033  
  1034  	// Write success response.
  1035  	writeSuccessResponseXML(w, encodedSuccessResponse)
  1036  
  1037  	// Notify object created event.
  1038  	evt := eventArgs{
  1039  		EventName:    event.ObjectCreatedCompleteMultipartUpload,
  1040  		BucketName:   bucket,
  1041  		Object:       objInfo,
  1042  		ReqParams:    extractReqParams(r),
  1043  		RespElements: extractRespElements(w),
  1044  		UserAgent:    r.UserAgent(),
  1045  		Host:         handlers.GetSourceIP(r),
  1046  	}
  1047  	sendEvent(evt)
  1048  
  1049  	asize, err := objInfo.GetActualSize()
  1050  	if err != nil {
  1051  		asize = objInfo.Size
  1052  	}
  1053  
  1054  	defer globalCacheConfig.Set(&cache.ObjectInfo{
  1055  		Key:          objInfo.Name,
  1056  		Bucket:       objInfo.Bucket,
  1057  		ETag:         objInfo.ETag,
  1058  		ModTime:      objInfo.ModTime,
  1059  		Expires:      objInfo.ExpiresStr(),
  1060  		CacheControl: objInfo.CacheControl,
  1061  		Size:         asize,
  1062  		Metadata:     cleanReservedKeys(objInfo.UserDefined),
  1063  	})
  1064  
  1065  	if objInfo.NumVersions > int(scannerExcessObjectVersions.Load()) {
  1066  		evt.EventName = event.ObjectManyVersions
  1067  		sendEvent(evt)
  1068  
  1069  		auditLogInternal(context.Background(), AuditLogOptions{
  1070  			Event:     "scanner:manyversions",
  1071  			APIName:   "CompleteMultipartUpload",
  1072  			Bucket:    objInfo.Bucket,
  1073  			Object:    objInfo.Name,
  1074  			VersionID: objInfo.VersionID,
  1075  			Status:    http.StatusText(http.StatusOK),
  1076  		})
  1077  	}
  1078  
  1079  	// Remove the transitioned object whose object version is being overwritten.
  1080  	if !globalTierConfigMgr.Empty() {
  1081  		// Schedule object for immediate transition if eligible.
  1082  		enqueueTransitionImmediate(objInfo, lcEventSrc_s3CompleteMultipartUpload)
  1083  		os.Sweep()
  1084  	}
  1085  }
  1086  
  1087  // AbortMultipartUploadHandler - Abort multipart upload
  1088  func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
  1089  	ctx := newContext(r, w, "AbortMultipartUpload")
  1090  
  1091  	defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
  1092  
  1093  	vars := mux.Vars(r)
  1094  	bucket := vars["bucket"]
  1095  	object, err := unescapePath(vars["object"])
  1096  	if err != nil {
  1097  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
  1098  		return
  1099  	}
  1100  
  1101  	objectAPI := api.ObjectAPI()
  1102  	if objectAPI == nil {
  1103  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
  1104  		return
  1105  	}
  1106  	abortMultipartUpload := objectAPI.AbortMultipartUpload
  1107  
  1108  	if s3Error := checkRequestAuthType(ctx, r, policy.AbortMultipartUploadAction, bucket, object); s3Error != ErrNone {
  1109  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
  1110  		return
  1111  	}
  1112  
  1113  	uploadID, _, _, _, s3Error := getObjectResources(r.Form)
  1114  	if s3Error != ErrNone {
  1115  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
  1116  		return
  1117  	}
  1118  	opts := ObjectOptions{}
  1119  	if err := abortMultipartUpload(ctx, bucket, object, uploadID, opts); err != nil {
  1120  		switch err.(type) {
  1121  		case InvalidUploadID:
  1122  			// Do not have return an error for non-existent upload-id
  1123  		default:
  1124  			writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
  1125  			return
  1126  		}
  1127  	}
  1128  
  1129  	writeSuccessNoContent(w)
  1130  }
  1131  
  1132  // ListObjectPartsHandler - List object parts
  1133  func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
  1134  	ctx := newContext(r, w, "ListObjectParts")
  1135  
  1136  	defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
  1137  
  1138  	vars := mux.Vars(r)
  1139  	bucket := vars["bucket"]
  1140  	object, err := unescapePath(vars["object"])
  1141  	if err != nil {
  1142  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
  1143  		return
  1144  	}
  1145  
  1146  	objectAPI := api.ObjectAPI()
  1147  	if objectAPI == nil {
  1148  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
  1149  		return
  1150  	}
  1151  
  1152  	if s3Error := checkRequestAuthType(ctx, r, policy.ListMultipartUploadPartsAction, bucket, object); s3Error != ErrNone {
  1153  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
  1154  		return
  1155  	}
  1156  
  1157  	uploadID, partNumberMarker, maxParts, encodingType, s3Error := getObjectResources(r.Form)
  1158  	if s3Error != ErrNone {
  1159  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
  1160  		return
  1161  	}
  1162  	if partNumberMarker < 0 {
  1163  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartNumberMarker), r.URL)
  1164  		return
  1165  	}
  1166  	if maxParts < 0 {
  1167  		writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
  1168  		return
  1169  	}
  1170  
  1171  	opts := ObjectOptions{}
  1172  	listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
  1173  	if err != nil {
  1174  		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
  1175  		return
  1176  	}
  1177  
  1178  	// We have to adjust the size of encrypted parts since encrypted parts
  1179  	// are slightly larger due to encryption overhead.
  1180  	// Further, we have to adjust the ETags of parts when using SSE-S3.
  1181  	// Due to AWS S3, SSE-S3 encrypted parts return the plaintext ETag
  1182  	// being the content MD5 of that particular part. This is not the
  1183  	// case for SSE-C and SSE-KMS objects.
  1184  	if kind, ok := crypto.IsEncrypted(listPartsInfo.UserDefined); ok {
  1185  		var objectEncryptionKey []byte
  1186  		if kind == crypto.S3 {
  1187  			objectEncryptionKey, err = decryptObjectMeta(nil, bucket, object, listPartsInfo.UserDefined)
  1188  			if err != nil {
  1189  				writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
  1190  				return
  1191  			}
  1192  		}
  1193  		for i, p := range listPartsInfo.Parts {
  1194  			listPartsInfo.Parts[i].ETag = tryDecryptETag(objectEncryptionKey, p.ETag, kind == crypto.S3)
  1195  			listPartsInfo.Parts[i].Size = p.ActualSize
  1196  		}
  1197  	}
  1198  
  1199  	response := generateListPartsResponse(listPartsInfo, encodingType)
  1200  	encodedSuccessResponse := encodeResponse(response)
  1201  
  1202  	// Write success response.
  1203  	writeSuccessResponseXML(w, encodedSuccessResponse)
  1204  }