storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/object-handlers.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2015-2020 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "bufio" 21 "context" 22 "encoding/hex" 23 "encoding/xml" 24 "fmt" 25 "io" 26 "net/http" 27 "net/http/httptest" 28 "net/url" 29 "os" 30 "sort" 31 "strconv" 32 "strings" 33 "sync" 34 "time" 35 36 "github.com/google/uuid" 37 "github.com/gorilla/mux" 38 "github.com/minio/minio-go/v7/pkg/encrypt" 39 "github.com/minio/minio-go/v7/pkg/tags" 40 "github.com/minio/sio" 41 42 "storj.io/minio/cmd/config/storageclass" 43 "storj.io/minio/cmd/crypto" 44 xhttp "storj.io/minio/cmd/http" 45 "storj.io/minio/cmd/logger" 46 "storj.io/minio/pkg/bucket/lifecycle" 47 objectlock "storj.io/minio/pkg/bucket/object/lock" 48 "storj.io/minio/pkg/bucket/policy" 49 "storj.io/minio/pkg/bucket/replication" 50 "storj.io/minio/pkg/etag" 51 "storj.io/minio/pkg/event" 52 "storj.io/minio/pkg/fips" 53 "storj.io/minio/pkg/handlers" 54 "storj.io/minio/pkg/hash" 55 iampolicy "storj.io/minio/pkg/iam/policy" 56 "storj.io/minio/pkg/ioutil" 57 xnet "storj.io/minio/pkg/net" 58 "storj.io/minio/pkg/s3select" 59 ) 60 61 // supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request. 62 var supportedHeadGetReqParams = map[string]string{ 63 "response-expires": xhttp.Expires, 64 "response-content-type": xhttp.ContentType, 65 "response-cache-control": xhttp.CacheControl, 66 "response-content-encoding": xhttp.ContentEncoding, 67 "response-content-language": xhttp.ContentLanguage, 68 "response-content-disposition": xhttp.ContentDisposition, 69 } 70 71 const ( 72 compressionAlgorithmV1 = "golang/snappy/LZ77" 73 compressionAlgorithmV2 = "klauspost/compress/s2" 74 75 // When an upload exceeds encryptBufferThreshold ... 76 encryptBufferThreshold = 1 << 20 77 // add an input buffer of this size. 78 encryptBufferSize = 1 << 20 79 ) 80 81 // setHeadGetRespHeaders - set any requested parameters as response headers. 82 func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) { 83 for k, v := range reqParams { 84 if header, ok := supportedHeadGetReqParams[strings.ToLower(k)]; ok { 85 w.Header()[header] = v 86 } 87 } 88 } 89 90 // SelectObjectContentHandler - GET Object?select 91 // ---------- 92 // This implementation of the GET operation retrieves object content based 93 // on an SQL expression. In the request, along with the sql expression, you must 94 // also specify a data serialization format (JSON, CSV) of the object. 95 func (api ObjectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) { 96 ctx := NewContext(r, w, "SelectObject") 97 98 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 99 100 // Fetch object stat info. 101 objectAPI := api.ObjectAPI() 102 if objectAPI == nil { 103 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 104 return 105 } 106 107 if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error 108 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r)) 109 return 110 } 111 112 if _, ok := crypto.IsRequested(r.Header); ok && !objectAPI.IsEncryptionSupported() { 113 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r)) 114 return 115 } 116 117 vars := mux.Vars(r) 118 bucket := vars["bucket"] 119 object, err := unescapePath(vars["object"]) 120 if err != nil { 121 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 122 return 123 } 124 125 // get gateway encryption options 126 opts, err := getOpts(ctx, r, bucket, object) 127 if err != nil { 128 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 129 return 130 } 131 132 getObjectInfo := objectAPI.GetObjectInfo 133 if api.CacheAPI() != nil { 134 getObjectInfo = api.CacheAPI().GetObjectInfo 135 } 136 137 // Check for auth type to return S3 compatible error. 138 // type to return the correct error (NoSuchKey vs AccessDenied) 139 if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { 140 if getRequestAuthType(r) == authTypeAnonymous { 141 // As per "Permission" section in 142 // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html 143 // If the object you request does not exist, 144 // the error Amazon S3 returns depends on 145 // whether you also have the s3:ListBucket 146 // permission. 147 // * If you have the s3:ListBucket permission 148 // on the bucket, Amazon S3 will return an 149 // HTTP status code 404 ("no such key") 150 // error. 151 // * if you don’t have the s3:ListBucket 152 // permission, Amazon S3 will return an HTTP 153 // status code 403 ("access denied") error.` 154 if globalPolicySys.IsAllowed(policy.Args{ 155 Action: policy.ListBucketAction, 156 BucketName: bucket, 157 ConditionValues: getConditionValues(r, "", "", nil), 158 IsOwner: false, 159 }) { 160 _, err = getObjectInfo(ctx, bucket, object, opts) 161 if ToAPIError(ctx, err).Code == "NoSuchKey" { 162 s3Error = ErrNoSuchKey 163 } 164 } 165 } 166 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 167 return 168 } 169 170 // Get request range. 171 rangeHeader := r.Header.Get(xhttp.Range) 172 if rangeHeader != "" { 173 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrUnsupportedRangeHeader), r.URL, guessIsBrowserReq(r)) 174 return 175 } 176 177 if r.ContentLength <= 0 { 178 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL, guessIsBrowserReq(r)) 179 return 180 } 181 182 getObjectNInfo := objectAPI.GetObjectNInfo 183 if api.CacheAPI() != nil { 184 getObjectNInfo = api.CacheAPI().GetObjectNInfo 185 } 186 187 getObject := func(offset, length int64) (rc io.ReadCloser, err error) { 188 isSuffixLength := false 189 if offset < 0 { 190 isSuffixLength = true 191 } 192 193 if length > 0 { 194 length-- 195 } 196 197 rs := &HTTPRangeSpec{ 198 IsSuffixLength: isSuffixLength, 199 Start: offset, 200 End: offset + length, 201 } 202 203 return getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts) 204 } 205 206 objInfo, err := getObjectInfo(ctx, bucket, object, opts) 207 if err != nil { 208 if globalBucketVersioningSys.Enabled(bucket) { 209 // Versioning enabled quite possibly object is deleted might be delete-marker 210 // if present set the headers, no idea why AWS S3 sets these headers. 211 if objInfo.VersionID != "" && objInfo.DeleteMarker { 212 w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} 213 w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)} 214 } 215 } 216 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 217 return 218 } 219 220 // filter object lock metadata if permission does not permit 221 getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object) 222 legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object) 223 224 // filter object lock metadata if permission does not permit 225 objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone) 226 227 if objectAPI.IsEncryptionSupported() { 228 if _, err = DecryptObjectInfo(&objInfo, r); err != nil { 229 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 230 return 231 } 232 } 233 234 s3Select, err := s3select.NewS3Select(r.Body) 235 if err != nil { 236 if serr, ok := err.(s3select.SelectError); ok { 237 encodedErrorResponse := EncodeResponse(APIErrorResponse{ 238 Code: serr.ErrorCode(), 239 Message: serr.ErrorMessage(), 240 BucketName: bucket, 241 Key: object, 242 Resource: r.URL.Path, 243 RequestID: w.Header().Get(xhttp.AmzRequestID), 244 HostID: globalDeploymentID, 245 }) 246 writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML) 247 } else { 248 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 249 } 250 return 251 } 252 defer s3Select.Close() 253 254 if err = s3Select.Open(getObject); err != nil { 255 if serr, ok := err.(s3select.SelectError); ok { 256 encodedErrorResponse := EncodeResponse(APIErrorResponse{ 257 Code: serr.ErrorCode(), 258 Message: serr.ErrorMessage(), 259 BucketName: bucket, 260 Key: object, 261 Resource: r.URL.Path, 262 RequestID: w.Header().Get(xhttp.AmzRequestID), 263 HostID: globalDeploymentID, 264 }) 265 writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML) 266 } else { 267 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 268 } 269 return 270 } 271 272 // Set encryption response headers 273 if objectAPI.IsEncryptionSupported() { 274 switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind { 275 case crypto.S3: 276 w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 277 case crypto.SSEC: 278 // Validate the SSE-C Key set in the header. 279 if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil { 280 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 281 return 282 } 283 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm)) 284 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5)) 285 } 286 } 287 288 s3Select.Evaluate(w) 289 290 // Notify object accessed via a GET request. 291 sendEvent(eventArgs{ 292 EventName: event.ObjectAccessedGet, 293 BucketName: bucket, 294 Object: objInfo, 295 ReqParams: extractReqParams(r), 296 RespElements: extractRespElements(w), 297 UserAgent: r.UserAgent(), 298 Host: handlers.GetSourceIP(r), 299 }) 300 } 301 302 // GetObjectHandler - GET Object 303 // ---------- 304 // This implementation of the GET operation retrieves object. To use GET, 305 // you must have READ access to the object. 306 func (api ObjectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { 307 ctx := NewContext(r, w, "GetObject") 308 309 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 310 311 objectAPI := api.ObjectAPI() 312 if objectAPI == nil { 313 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 314 return 315 } 316 if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error 317 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r)) 318 return 319 } 320 if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok { 321 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r)) 322 return 323 } 324 vars := mux.Vars(r) 325 bucket := vars["bucket"] 326 object, err := unescapePath(vars["object"]) 327 if err != nil { 328 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 329 return 330 } 331 332 // get gateway encryption options 333 opts, err := getOpts(ctx, r, bucket, object) 334 if err != nil { 335 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 336 return 337 } 338 339 // Check for auth type to return S3 compatible error. 340 // type to return the correct error (NoSuchKey vs AccessDenied) 341 if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { 342 if getRequestAuthType(r) == authTypeAnonymous { 343 // As per "Permission" section in 344 // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html 345 // If the object you request does not exist, 346 // the error Amazon S3 returns depends on 347 // whether you also have the s3:ListBucket 348 // permission. 349 // * If you have the s3:ListBucket permission 350 // on the bucket, Amazon S3 will return an 351 // HTTP status code 404 ("no such key") 352 // error. 353 // * if you don’t have the s3:ListBucket 354 // permission, Amazon S3 will return an HTTP 355 // status code 403 ("access denied") error.` 356 if globalPolicySys.IsAllowed(policy.Args{ 357 Action: policy.ListBucketAction, 358 BucketName: bucket, 359 ConditionValues: getConditionValues(r, "", "", nil), 360 IsOwner: false, 361 }) { 362 getObjectInfo := objectAPI.GetObjectInfo 363 if api.CacheAPI() != nil { 364 getObjectInfo = api.CacheAPI().GetObjectInfo 365 } 366 367 _, err = getObjectInfo(ctx, bucket, object, opts) 368 if ToAPIError(ctx, err).Code == "NoSuchKey" { 369 s3Error = ErrNoSuchKey 370 } 371 } 372 } 373 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 374 return 375 } 376 377 getObjectNInfo := objectAPI.GetObjectNInfo 378 if api.CacheAPI() != nil { 379 getObjectNInfo = api.CacheAPI().GetObjectNInfo 380 } 381 382 // Get request range. 383 var rs *HTTPRangeSpec 384 var rangeErr error 385 rangeHeader := r.Header.Get(xhttp.Range) 386 if rangeHeader != "" { 387 rs, rangeErr = parseRequestRangeSpec(rangeHeader) 388 // Handle only errInvalidRange. Ignore other 389 // parse error and treat it as regular Get 390 // request like Amazon S3. 391 if rangeErr == errInvalidRange { 392 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRange), r.URL, guessIsBrowserReq(r)) 393 return 394 } 395 if rangeErr != nil { 396 logger.LogIf(ctx, rangeErr, logger.Application) 397 } 398 } 399 400 // Both 'bytes' and 'partNumber' cannot be specified at the same time 401 if rs != nil && opts.PartNumber > 0 { 402 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRangePartNumber), r.URL, guessIsBrowserReq(r)) 403 return 404 } 405 406 // Validate pre-conditions if any. 407 opts.CheckPrecondFn = func(oi ObjectInfo) bool { 408 if objectAPI.IsEncryptionSupported() { 409 if _, err := DecryptObjectInfo(&oi, r); err != nil { 410 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 411 return true 412 } 413 } 414 415 return checkPreconditions(ctx, w, r, oi, opts) 416 } 417 418 gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts) 419 if err != nil { 420 var ( 421 reader *GetObjectReader 422 proxy bool 423 ) 424 if isProxyable(ctx, bucket) { 425 // proxy to replication target if active-active replication is in place. 426 reader, proxy = proxyGetToReplicationTarget(ctx, bucket, object, rs, r.Header, opts) 427 if reader != nil && proxy { 428 gr = reader 429 } 430 } 431 if reader == nil || !proxy { 432 if isErrPreconditionFailed(err) { 433 return 434 } 435 if globalBucketVersioningSys.Enabled(bucket) && gr != nil { 436 if !gr.ObjInfo.VersionPurgeStatus.Empty() { 437 // Shows the replication status of a permanent delete of a version 438 w.Header()[xhttp.MinIODeleteReplicationStatus] = []string{string(gr.ObjInfo.VersionPurgeStatus)} 439 } 440 if !gr.ObjInfo.ReplicationStatus.Empty() && gr.ObjInfo.DeleteMarker { 441 w.Header()[xhttp.MinIODeleteMarkerReplicationStatus] = []string{string(gr.ObjInfo.ReplicationStatus)} 442 } 443 444 // Versioning enabled quite possibly object is deleted might be delete-marker 445 // if present set the headers, no idea why AWS S3 sets these headers. 446 if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker { 447 w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID} 448 w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)} 449 } 450 } 451 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 452 return 453 } 454 } 455 defer gr.Close() 456 457 objInfo := gr.ObjInfo 458 459 // Automatically remove the object/version is an expiry lifecycle rule can be applied 460 if lc, err := globalLifecycleSys.Get(bucket); err == nil { 461 action := evalActionFromLifecycle(ctx, *lc, objInfo, false) 462 if action == lifecycle.DeleteAction || action == lifecycle.DeleteVersionAction { 463 globalExpiryState.queueExpiryTask(objInfo, action == lifecycle.DeleteVersionAction) 464 writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey)) 465 return 466 } 467 } 468 469 // filter object lock metadata if permission does not permit 470 getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object) 471 legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object) 472 473 // filter object lock metadata if permission does not permit 474 objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone) 475 476 // Set encryption response headers 477 if objectAPI.IsEncryptionSupported() { 478 switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind { 479 case crypto.S3: 480 w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 481 case crypto.SSEC: 482 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm)) 483 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5)) 484 } 485 } 486 487 if err = setObjectHeaders(w, objInfo, rs, opts); err != nil { 488 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 489 return 490 } 491 492 // Set Parts Count Header 493 if opts.PartNumber > 0 && len(objInfo.Parts) > 0 { 494 setPartsCountHeaders(w, objInfo) 495 } 496 497 setHeadGetRespHeaders(w, r.URL.Query()) 498 499 statusCodeWritten := false 500 httpWriter := ioutil.WriteOnClose(w) 501 if rs != nil || opts.PartNumber > 0 { 502 statusCodeWritten = true 503 w.WriteHeader(http.StatusPartialContent) 504 } 505 506 // Write object content to response body 507 if _, err = io.Copy(httpWriter, gr); err != nil { 508 if !httpWriter.HasWritten() && !statusCodeWritten { 509 // write error response only if no data or headers has been written to client yet 510 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 511 return 512 } 513 if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients 514 logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client %w", err)) 515 } 516 return 517 } 518 519 if err = httpWriter.Close(); err != nil { 520 if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet 521 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 522 return 523 } 524 if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients 525 logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client %w", err)) 526 } 527 return 528 } 529 530 // Notify object accessed via a GET request. 531 sendEvent(eventArgs{ 532 EventName: event.ObjectAccessedGet, 533 BucketName: bucket, 534 Object: objInfo, 535 ReqParams: extractReqParams(r), 536 RespElements: extractRespElements(w), 537 UserAgent: r.UserAgent(), 538 Host: handlers.GetSourceIP(r), 539 }) 540 } 541 542 // HeadObjectHandler - HEAD Object 543 // ----------- 544 // The HEAD operation retrieves metadata from an object without returning the object itself. 545 func (api ObjectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { 546 ctx := NewContext(r, w, "HeadObject") 547 548 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 549 550 objectAPI := api.ObjectAPI() 551 if objectAPI == nil { 552 writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized)) 553 return 554 } 555 if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error 556 writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrBadRequest)) 557 return 558 } 559 if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok { 560 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL, guessIsBrowserReq(r)) 561 return 562 } 563 vars := mux.Vars(r) 564 bucket := vars["bucket"] 565 object, err := unescapePath(vars["object"]) 566 if err != nil { 567 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 568 return 569 } 570 571 getObjectInfo := objectAPI.GetObjectInfo 572 if api.CacheAPI() != nil { 573 getObjectInfo = api.CacheAPI().GetObjectInfo 574 } 575 576 opts, err := getOpts(ctx, r, bucket, object) 577 if err != nil { 578 writeErrorResponseHeadersOnly(w, ToAPIError(ctx, err)) 579 return 580 } 581 582 if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { 583 if getRequestAuthType(r) == authTypeAnonymous { 584 // As per "Permission" section in 585 // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html 586 // If the object you request does not exist, 587 // the error Amazon S3 returns depends on 588 // whether you also have the s3:ListBucket 589 // permission. 590 // * If you have the s3:ListBucket permission 591 // on the bucket, Amazon S3 will return an 592 // HTTP status code 404 ("no such key") 593 // error. 594 // * if you don’t have the s3:ListBucket 595 // permission, Amazon S3 will return an HTTP 596 // status code 403 ("access denied") error.` 597 if globalPolicySys.IsAllowed(policy.Args{ 598 Action: policy.ListBucketAction, 599 BucketName: bucket, 600 ConditionValues: getConditionValues(r, "", "", nil), 601 IsOwner: false, 602 }) { 603 _, err = getObjectInfo(ctx, bucket, object, opts) 604 if ToAPIError(ctx, err).Code == "NoSuchKey" { 605 s3Error = ErrNoSuchKey 606 } 607 } 608 } 609 writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error)) 610 return 611 } 612 613 objInfo, err := getObjectInfo(ctx, bucket, object, opts) 614 if err != nil { 615 var ( 616 proxy bool 617 perr error 618 oi ObjectInfo 619 ) 620 // proxy HEAD to replication target if active-active replication configured on bucket 621 if isProxyable(ctx, bucket) { 622 oi, proxy, perr = proxyHeadToReplicationTarget(ctx, bucket, object, opts) 623 if proxy && perr == nil { 624 objInfo = oi 625 } 626 } 627 if !proxy || perr != nil { 628 if globalBucketVersioningSys.Enabled(bucket) { 629 if !objInfo.VersionPurgeStatus.Empty() { 630 // Shows the replication status of a permanent delete of a version 631 w.Header()[xhttp.MinIODeleteReplicationStatus] = []string{string(objInfo.VersionPurgeStatus)} 632 } 633 if !objInfo.ReplicationStatus.Empty() && objInfo.DeleteMarker { 634 w.Header()[xhttp.MinIODeleteMarkerReplicationStatus] = []string{string(objInfo.ReplicationStatus)} 635 } 636 // Versioning enabled quite possibly object is deleted might be delete-marker 637 // if present set the headers, no idea why AWS S3 sets these headers. 638 if objInfo.VersionID != "" && objInfo.DeleteMarker { 639 w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} 640 w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)} 641 } 642 } 643 writeErrorResponseHeadersOnly(w, ToAPIError(ctx, err)) 644 return 645 } 646 } 647 648 // Automatically remove the object/version is an expiry lifecycle rule can be applied 649 if lc, err := globalLifecycleSys.Get(bucket); err == nil { 650 action := evalActionFromLifecycle(ctx, *lc, objInfo, false) 651 if action == lifecycle.DeleteAction || action == lifecycle.DeleteVersionAction { 652 globalExpiryState.queueExpiryTask(objInfo, action == lifecycle.DeleteVersionAction) 653 writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey)) 654 return 655 } 656 } 657 658 // filter object lock metadata if permission does not permit 659 getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object) 660 legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object) 661 662 // filter object lock metadata if permission does not permit 663 objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone) 664 665 if objectAPI.IsEncryptionSupported() { 666 if _, err = DecryptObjectInfo(&objInfo, r); err != nil { 667 writeErrorResponseHeadersOnly(w, ToAPIError(ctx, err)) 668 return 669 } 670 } 671 672 // Validate pre-conditions if any. 673 if checkPreconditions(ctx, w, r, objInfo, opts) { 674 return 675 } 676 677 // Get request range. 678 var rs *HTTPRangeSpec 679 rangeHeader := r.Header.Get(xhttp.Range) 680 if rangeHeader != "" { 681 if rs, err = parseRequestRangeSpec(rangeHeader); err != nil { 682 // Handle only errInvalidRange. Ignore other 683 // parse error and treat it as regular Get 684 // request like Amazon S3. 685 if err == errInvalidRange { 686 writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange)) 687 return 688 } 689 690 logger.LogIf(ctx, err) 691 } 692 } 693 694 // Both 'bytes' and 'partNumber' cannot be specified at the same time 695 if rs != nil && opts.PartNumber > 0 { 696 writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRangePartNumber)) 697 return 698 } 699 700 // Set encryption response headers 701 if objectAPI.IsEncryptionSupported() { 702 switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind { 703 case crypto.S3: 704 w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 705 case crypto.SSEC: 706 // Validate the SSE-C Key set in the header. 707 if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil { 708 writeErrorResponseHeadersOnly(w, ToAPIError(ctx, err)) 709 return 710 } 711 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm)) 712 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5)) 713 } 714 } 715 716 // Set standard object headers. 717 if err = setObjectHeaders(w, objInfo, rs, opts); err != nil { 718 writeErrorResponseHeadersOnly(w, ToAPIError(ctx, err)) 719 return 720 } 721 722 // Set Parts Count Header 723 if opts.PartNumber > 0 && len(objInfo.Parts) > 0 { 724 setPartsCountHeaders(w, objInfo) 725 } 726 727 // Set any additional requested response headers. 728 setHeadGetRespHeaders(w, r.URL.Query()) 729 730 // Successful response. 731 if rs != nil || opts.PartNumber > 0 { 732 w.WriteHeader(http.StatusPartialContent) 733 } else { 734 w.WriteHeader(http.StatusOK) 735 } 736 737 // Notify object accessed via a HEAD request. 738 sendEvent(eventArgs{ 739 EventName: event.ObjectAccessedHead, 740 BucketName: bucket, 741 Object: objInfo, 742 ReqParams: extractReqParams(r), 743 RespElements: extractRespElements(w), 744 UserAgent: r.UserAgent(), 745 Host: handlers.GetSourceIP(r), 746 }) 747 } 748 749 // Extract metadata relevant for an CopyObject operation based on conditional 750 // header values specified in X-Amz-Metadata-Directive. 751 func getCpObjMetadataFromHeader(ctx context.Context, r *http.Request, userMeta map[string]string) (map[string]string, error) { 752 // Make a copy of the supplied metadata to avoid 753 // to change the original one. 754 defaultMeta := make(map[string]string, len(userMeta)) 755 for k, v := range userMeta { 756 defaultMeta[k] = v 757 } 758 759 // remove SSE Headers from source info 760 crypto.RemoveSSEHeaders(defaultMeta) 761 762 // Storage class is special, it can be replaced regardless of the 763 // metadata directive, if set should be preserved and replaced 764 // to the destination metadata. 765 sc := r.Header.Get(xhttp.AmzStorageClass) 766 if sc == "" { 767 sc = r.URL.Query().Get(xhttp.AmzStorageClass) 768 } 769 770 // if x-amz-metadata-directive says REPLACE then 771 // we extract metadata from the input headers. 772 if isDirectiveReplace(r.Header.Get(xhttp.AmzMetadataDirective)) { 773 emetadata, err := extractMetadata(ctx, r) 774 if err != nil { 775 return nil, err 776 } 777 if sc != "" { 778 emetadata[xhttp.AmzStorageClass] = sc 779 } 780 return emetadata, nil 781 } 782 783 if sc != "" { 784 defaultMeta[xhttp.AmzStorageClass] = sc 785 } 786 787 // if x-amz-metadata-directive says COPY then we 788 // return the default metadata. 789 if isDirectiveCopy(r.Header.Get(xhttp.AmzMetadataDirective)) { 790 return defaultMeta, nil 791 } 792 793 // Copy is default behavior if not x-amz-metadata-directive is set. 794 return defaultMeta, nil 795 } 796 797 // getRemoteInstanceTransport contains a singleton roundtripper. 798 var ( 799 getRemoteInstanceTransport *http.Transport 800 getRemoteInstanceTransportOnce sync.Once 801 ) 802 803 // CopyObjectHandler - Copy Object 804 // ---------- 805 // This implementation of the PUT operation adds an object to a bucket 806 // while reading the object from another source. 807 // Notice: The S3 client can send secret keys in headers for encryption related jobs, 808 // the handler should ensure to remove these keys before sending them to the object layer. 809 // Currently these keys are: 810 // - X-Amz-Server-Side-Encryption-Customer-Key 811 // - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key 812 func (api ObjectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { 813 ctx := NewContext(r, w, "CopyObject") 814 815 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 816 817 objectAPI := api.ObjectAPI() 818 if objectAPI == nil { 819 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 820 return 821 } 822 823 if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported 824 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 825 return 826 } 827 828 if _, ok := crypto.IsRequested(r.Header); ok { 829 if GlobalIsGateway { 830 if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { 831 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 832 return 833 } 834 } else { 835 if !objectAPI.IsEncryptionSupported() { 836 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 837 return 838 } 839 } 840 } 841 842 vars := mux.Vars(r) 843 dstBucket := vars["bucket"] 844 dstObject, err := unescapePath(vars["object"]) 845 if err != nil { 846 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 847 return 848 } 849 850 if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone { 851 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 852 return 853 } 854 855 // Read escaped copy source path to check for parameters. 856 cpSrcPath := r.Header.Get(xhttp.AmzCopySource) 857 var vid string 858 if u, err := url.Parse(cpSrcPath); err == nil { 859 vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID)) 860 // Note that url.Parse does the unescaping 861 cpSrcPath = u.Path 862 } 863 864 srcBucket, srcObject := path2BucketObject(cpSrcPath) 865 // If source object is empty or bucket is empty, reply back invalid copy source. 866 if srcObject == "" || srcBucket == "" { 867 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r)) 868 return 869 } 870 871 if vid != "" && vid != nullVersionID { 872 _, err := uuid.Parse(vid) 873 if err != nil { 874 WriteErrorResponse(ctx, w, ToAPIError(ctx, VersionNotFound{ 875 Bucket: srcBucket, 876 Object: srcObject, 877 VersionID: vid, 878 }), r.URL, guessIsBrowserReq(r)) 879 return 880 } 881 } 882 883 if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone { 884 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 885 return 886 } 887 888 // Check if metadata directive is valid. 889 if !isDirectiveValid(r.Header.Get(xhttp.AmzMetadataDirective)) { 890 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMetadataDirective), r.URL, guessIsBrowserReq(r)) 891 return 892 } 893 894 // check if tag directive is valid 895 if !isDirectiveValid(r.Header.Get(xhttp.AmzTagDirective)) { 896 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidTagDirective), r.URL, guessIsBrowserReq(r)) 897 return 898 } 899 900 // Validate storage class metadata if present 901 dstSc := r.Header.Get(xhttp.AmzStorageClass) 902 if dstSc != "" && !storageclass.IsValid(dstSc) { 903 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r)) 904 return 905 } 906 907 // Check if bucket encryption is enabled 908 _, err = globalBucketSSEConfigSys.Get(dstBucket) 909 // This request header needs to be set prior to setting ObjectOptions 910 if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) { 911 r.Header.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 912 } 913 914 var srcOpts, dstOpts ObjectOptions 915 srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject) 916 if err != nil { 917 logger.LogIf(ctx, err) 918 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 919 return 920 } 921 srcOpts.VersionID = vid 922 923 // convert copy src encryption options for GET calls 924 var getOpts = ObjectOptions{VersionID: srcOpts.VersionID, Versioned: srcOpts.Versioned} 925 getSSE := encrypt.SSE(srcOpts.ServerSideEncryption) 926 if getSSE != srcOpts.ServerSideEncryption { 927 getOpts.ServerSideEncryption = getSSE 928 } 929 930 dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil) 931 if err != nil { 932 logger.LogIf(ctx, err) 933 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 934 return 935 } 936 cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) 937 938 getObjectNInfo := objectAPI.GetObjectNInfo 939 if api.CacheAPI() != nil { 940 getObjectNInfo = api.CacheAPI().GetObjectNInfo 941 } 942 943 checkCopyPrecondFn := func(o ObjectInfo) bool { 944 if objectAPI.IsEncryptionSupported() { 945 if _, err := DecryptObjectInfo(&o, r); err != nil { 946 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 947 return true 948 } 949 } 950 return checkCopyObjectPreconditions(ctx, w, r, o) 951 } 952 getOpts.CheckPrecondFn = checkCopyPrecondFn 953 954 // FIXME: a possible race exists between a parallel 955 // GetObject v/s CopyObject with metadata updates, ideally 956 // we should be holding write lock here but it is not 957 // possible due to other constraints such as knowing 958 // the type of source content etc. 959 lock := noLock 960 if !cpSrcDstSame { 961 lock = readLock 962 } 963 964 var rs *HTTPRangeSpec 965 gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts) 966 if err != nil { 967 if isErrPreconditionFailed(err) { 968 return 969 } 970 if globalBucketVersioningSys.Enabled(srcBucket) && gr != nil { 971 // Versioning enabled quite possibly object is deleted might be delete-marker 972 // if present set the headers, no idea why AWS S3 sets these headers. 973 if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker { 974 w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID} 975 w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)} 976 } 977 } 978 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 979 return 980 } 981 defer gr.Close() 982 srcInfo := gr.ObjInfo 983 984 // maximum Upload size for object in a single CopyObject operation. 985 if isMaxObjectSize(srcInfo.Size) { 986 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r)) 987 return 988 } 989 990 // We have to copy metadata only if source and destination are same. 991 // this changes for encryption which can be observed below. 992 if cpSrcDstSame { 993 srcInfo.metadataOnly = true 994 } 995 996 var chStorageClass bool 997 if dstSc != "" { 998 chStorageClass = true 999 srcInfo.metadataOnly = false 1000 } 1001 1002 var reader io.Reader = gr 1003 1004 // Set the actual size to the compressed/decrypted size if encrypted. 1005 actualSize, err := srcInfo.GetActualSize() 1006 if err != nil { 1007 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1008 return 1009 } 1010 length := actualSize 1011 1012 if !cpSrcDstSame { 1013 if err := enforceBucketQuota(ctx, dstBucket, actualSize); err != nil { 1014 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1015 return 1016 } 1017 } 1018 1019 // Check if either the source is encrypted or the destination will be encrypted. 1020 _, objectEncryption := crypto.IsRequested(r.Header) 1021 objectEncryption = objectEncryption || crypto.IsSourceEncrypted(srcInfo.UserDefined) 1022 1023 var compressMetadata map[string]string 1024 // No need to compress for remote etcd calls 1025 // Pass the decompressed stream to such calls. 1026 isDstCompressed := objectAPI.IsCompressionSupported() && 1027 isCompressible(r.Header, dstObject) && !cpSrcDstSame && !objectEncryption 1028 if isDstCompressed { 1029 compressMetadata = make(map[string]string, 2) 1030 // Preserving the compression metadata. 1031 compressMetadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 1032 compressMetadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10) 1033 1034 s2c := newS2CompressReader(reader, actualSize) 1035 defer s2c.Close() 1036 reader = etag.Wrap(s2c, reader) 1037 length = -1 1038 } else { 1039 delete(srcInfo.UserDefined, ReservedMetadataPrefix+"compression") 1040 delete(srcInfo.UserDefined, ReservedMetadataPrefix+"actual-size") 1041 reader = gr 1042 } 1043 1044 srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize) 1045 if err != nil { 1046 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1047 return 1048 } 1049 1050 pReader := NewPutObjReader(srcInfo.Reader) 1051 1052 // Handle encryption 1053 var encMetadata = make(map[string]string) 1054 if objectAPI.IsEncryptionSupported() { 1055 // Encryption parameters not applicable for this object. 1056 if _, ok := crypto.IsEncrypted(srcInfo.UserDefined); !ok && crypto.SSECopy.IsRequested(r.Header) { 1057 WriteErrorResponse(ctx, w, ToAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r)) 1058 return 1059 } 1060 // Encryption parameters not present for this object. 1061 if crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && !crypto.SSECopy.IsRequested(r.Header) { 1062 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidSSECustomerAlgorithm), r.URL, guessIsBrowserReq(r)) 1063 return 1064 } 1065 1066 var oldKey, newKey []byte 1067 var objEncKey crypto.ObjectKey 1068 sseCopyS3 := crypto.S3.IsEncrypted(srcInfo.UserDefined) 1069 sseCopyC := crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header) 1070 sseC := crypto.SSEC.IsRequested(r.Header) 1071 sseS3 := crypto.S3.IsRequested(r.Header) 1072 1073 isSourceEncrypted := sseCopyC || sseCopyS3 1074 isTargetEncrypted := sseC || sseS3 1075 1076 if sseC { 1077 newKey, err = ParseSSECustomerRequest(r) 1078 if err != nil { 1079 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1080 return 1081 } 1082 } 1083 1084 // If src == dst and either 1085 // - the object is encrypted using SSE-C and two different SSE-C keys are present 1086 // - the object is encrypted using SSE-S3 and the SSE-S3 header is present 1087 // - the object storage class is not changing 1088 // then execute a key rotation. 1089 if cpSrcDstSame && (sseCopyC && sseC) && !chStorageClass { 1090 oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined) 1091 if err != nil { 1092 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1093 return 1094 } 1095 1096 for k, v := range srcInfo.UserDefined { 1097 if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { 1098 encMetadata[k] = v 1099 } 1100 } 1101 1102 // In case of SSE-S3 oldKey and newKey aren't used - the KMS manages the keys. 1103 if err = rotateKey(oldKey, newKey, srcBucket, srcObject, encMetadata); err != nil { 1104 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1105 return 1106 } 1107 1108 // Since we are rotating the keys, make sure to update the metadata. 1109 srcInfo.metadataOnly = true 1110 srcInfo.keyRotation = true 1111 } else { 1112 if isSourceEncrypted || isTargetEncrypted { 1113 // We are not only copying just metadata instead 1114 // we are creating a new object at this point, even 1115 // if source and destination are same objects. 1116 if !srcInfo.keyRotation { 1117 srcInfo.metadataOnly = false 1118 } 1119 } 1120 1121 // Calculate the size of the target object 1122 var targetSize int64 1123 1124 switch { 1125 case isDstCompressed: 1126 targetSize = -1 1127 case !isSourceEncrypted && !isTargetEncrypted: 1128 targetSize, _ = srcInfo.GetActualSize() 1129 case isSourceEncrypted && isTargetEncrypted: 1130 objInfo := ObjectInfo{Size: actualSize} 1131 targetSize = objInfo.EncryptedSize() 1132 case !isSourceEncrypted && isTargetEncrypted: 1133 targetSize = srcInfo.EncryptedSize() 1134 case isSourceEncrypted && !isTargetEncrypted: 1135 targetSize, _ = srcInfo.DecryptedSize() 1136 } 1137 1138 if isTargetEncrypted { 1139 var encReader io.Reader 1140 encReader, objEncKey, err = newEncryptReader(srcInfo.Reader, newKey, dstBucket, dstObject, encMetadata, sseS3) 1141 if err != nil { 1142 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1143 return 1144 } 1145 reader = etag.Wrap(encReader, srcInfo.Reader) 1146 } 1147 1148 if isSourceEncrypted { 1149 // Remove all source encrypted related metadata to 1150 // avoid copying them in target object. 1151 crypto.RemoveInternalEntries(srcInfo.UserDefined) 1152 } 1153 1154 // do not try to verify encrypted content 1155 srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize) 1156 if err != nil { 1157 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1158 return 1159 } 1160 1161 if isTargetEncrypted { 1162 pReader, err = pReader.WithEncryption(srcInfo.Reader, &objEncKey) 1163 if err != nil { 1164 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1165 return 1166 } 1167 } 1168 } 1169 } 1170 1171 srcInfo.PutObjReader = pReader 1172 1173 srcInfo.UserDefined, err = getCpObjMetadataFromHeader(ctx, r, srcInfo.UserDefined) 1174 if err != nil { 1175 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1176 return 1177 } 1178 1179 objTags := srcInfo.UserTags 1180 // If x-amz-tagging-directive header is REPLACE, get passed tags. 1181 if isDirectiveReplace(r.Header.Get(xhttp.AmzTagDirective)) { 1182 objTags = r.Header.Get(xhttp.AmzObjectTagging) 1183 if _, err := tags.ParseObjectTags(objTags); err != nil { 1184 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1185 return 1186 } 1187 if GlobalIsGateway { 1188 srcInfo.UserDefined[xhttp.AmzTagDirective] = replaceDirective 1189 } 1190 } 1191 1192 if objTags != "" { 1193 srcInfo.UserDefined[xhttp.AmzObjectTagging] = objTags 1194 } 1195 srcInfo.UserDefined = filterReplicationStatusMetadata(srcInfo.UserDefined) 1196 1197 srcInfo.UserDefined = objectlock.FilterObjectLockMetadata(srcInfo.UserDefined, true, true) 1198 retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), dstBucket, dstObject, r, iampolicy.PutObjectRetentionAction) 1199 holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), dstBucket, dstObject, r, iampolicy.PutObjectLegalHoldAction) 1200 getObjectInfo := objectAPI.GetObjectInfo 1201 if api.CacheAPI() != nil { 1202 getObjectInfo = api.CacheAPI().GetObjectInfo 1203 } 1204 1205 // apply default bucket configuration/governance headers for dest side. 1206 retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, dstBucket, dstObject, getObjectInfo, retPerms, holdPerms) 1207 if s3Err == ErrNone && retentionMode.Valid() { 1208 srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode) 1209 srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat) 1210 } 1211 if s3Err == ErrNone && legalHold.Status.Valid() { 1212 srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status) 1213 } 1214 if s3Err != ErrNone { 1215 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1216 return 1217 } 1218 if rs := r.Header.Get(xhttp.AmzBucketReplicationStatus); rs != "" { 1219 srcInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = rs 1220 } 1221 if ok, _ := mustReplicate(ctx, r, dstBucket, dstObject, srcInfo.UserDefined, srcInfo.ReplicationStatus.String()); ok { 1222 srcInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() 1223 } 1224 // Store the preserved compression metadata. 1225 for k, v := range compressMetadata { 1226 srcInfo.UserDefined[k] = v 1227 } 1228 1229 // We need to preserve the encryption headers set in EncryptRequest, 1230 // so we do not want to override them, copy them instead. 1231 for k, v := range encMetadata { 1232 srcInfo.UserDefined[k] = v 1233 } 1234 1235 // Ensure that metadata does not contain sensitive information 1236 crypto.RemoveSensitiveEntries(srcInfo.UserDefined) 1237 1238 // If we see legacy source, metadataOnly we have to overwrite the content. 1239 if srcInfo.Legacy { 1240 srcInfo.metadataOnly = false 1241 } 1242 1243 // Check if x-amz-metadata-directive or x-amz-tagging-directive was not set to REPLACE and source, 1244 // destination are same objects. Apply this restriction also when 1245 // metadataOnly is true indicating that we are not overwriting the object. 1246 // if encryption is enabled we do not need explicit "REPLACE" metadata to 1247 // be enabled as well - this is to allow for key-rotation. 1248 if !isDirectiveReplace(r.Header.Get(xhttp.AmzMetadataDirective)) && !isDirectiveReplace(r.Header.Get(xhttp.AmzTagDirective)) && 1249 srcInfo.metadataOnly && srcOpts.VersionID == "" && !objectEncryption { 1250 // If x-amz-metadata-directive is not set to REPLACE then we need 1251 // to error out if source and destination are same. 1252 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyDest), r.URL, guessIsBrowserReq(r)) 1253 return 1254 } 1255 1256 copyObjectFn := objectAPI.CopyObject 1257 if api.CacheAPI() != nil { 1258 copyObjectFn = api.CacheAPI().CopyObject 1259 } 1260 1261 // Copy source object to destination, if source and destination 1262 // object is same then only metadata is updated. 1263 objInfo, err := copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) 1264 if err != nil { 1265 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1266 return 1267 } 1268 objInfo.ETag = getDecryptedETag(r.Header, objInfo, false) 1269 response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime) 1270 encodedSuccessResponse := EncodeResponse(response) 1271 if replicate, sync := mustReplicate(ctx, r, dstBucket, dstObject, objInfo.UserDefined, objInfo.ReplicationStatus.String()); replicate { 1272 scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType) 1273 } 1274 1275 setPutObjHeaders(w, objInfo, false) 1276 // We must not use the http.Header().Set method here because some (broken) 1277 // clients expect the x-amz-copy-source-version-id header key to be literally 1278 // "x-amz-copy-source-version-id"- not in canonicalized form, preserve it. 1279 if srcOpts.VersionID != "" { 1280 w.Header()[strings.ToLower(xhttp.AmzCopySourceVersionID)] = []string{srcOpts.VersionID} 1281 } 1282 1283 // Write success response. 1284 WriteSuccessResponseXML(w, encodedSuccessResponse) 1285 1286 // Notify object created event. 1287 sendEvent(eventArgs{ 1288 EventName: event.ObjectCreatedCopy, 1289 BucketName: dstBucket, 1290 Object: objInfo, 1291 ReqParams: extractReqParams(r), 1292 RespElements: extractRespElements(w), 1293 UserAgent: r.UserAgent(), 1294 Host: handlers.GetSourceIP(r), 1295 }) 1296 } 1297 1298 // PutObjectHandler - PUT Object 1299 // ---------- 1300 // This implementation of the PUT operation adds an object to a bucket. 1301 // Notice: The S3 client can send secret keys in headers for encryption related jobs, 1302 // the handler should ensure to remove these keys before sending them to the object layer. 1303 // Currently these keys are: 1304 // - X-Amz-Server-Side-Encryption-Customer-Key 1305 // - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key 1306 func (api ObjectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { 1307 ctx := NewContext(r, w, "PutObject") 1308 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 1309 1310 objectAPI := api.ObjectAPI() 1311 if objectAPI == nil { 1312 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 1313 return 1314 } 1315 1316 if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported 1317 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1318 return 1319 } 1320 1321 if _, ok := crypto.IsRequested(r.Header); ok { 1322 if GlobalIsGateway { 1323 if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { 1324 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1325 return 1326 } 1327 } else { 1328 if !objectAPI.IsEncryptionSupported() { 1329 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1330 return 1331 } 1332 } 1333 } 1334 1335 vars := mux.Vars(r) 1336 bucket := vars["bucket"] 1337 object, err := unescapePath(vars["object"]) 1338 if err != nil { 1339 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1340 return 1341 } 1342 1343 // X-Amz-Copy-Source shouldn't be set for this call. 1344 if _, ok := r.Header[xhttp.AmzCopySource]; ok { 1345 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r)) 1346 return 1347 } 1348 1349 // Validate storage class metadata if present 1350 if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" { 1351 if !storageclass.IsValid(sc) { 1352 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r)) 1353 return 1354 } 1355 } 1356 1357 clientETag, err := etag.FromContentMD5(r.Header) 1358 if err != nil { 1359 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL, guessIsBrowserReq(r)) 1360 return 1361 } 1362 1363 /// if Content-Length is unknown/missing, deny the request 1364 size := r.ContentLength 1365 // S3 appears to error if a Content-Length header is missing, regardless of 1366 // determinable length. 1367 if _, err := strconv.ParseInt(r.Header.Get(xhttp.ContentLength), 10, 64); err != nil { 1368 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r)) 1369 return 1370 } 1371 rAuthType := getRequestAuthType(r) 1372 if rAuthType == authTypeStreamingSigned { 1373 if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok { 1374 if sizeStr[0] == "" { 1375 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r)) 1376 return 1377 } 1378 size, err = strconv.ParseInt(sizeStr[0], 10, 64) 1379 if err != nil { 1380 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1381 return 1382 } 1383 } 1384 } 1385 if size == -1 { 1386 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r)) 1387 return 1388 } 1389 1390 /// maximum Upload size for objects in a single operation 1391 if isMaxObjectSize(size) { 1392 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r)) 1393 return 1394 } 1395 1396 metadata, err := extractMetadata(ctx, r) 1397 if err != nil { 1398 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1399 return 1400 } 1401 1402 if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" { 1403 if !objectAPI.IsTaggingSupported() { 1404 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1405 return 1406 } 1407 1408 if _, err := tags.ParseObjectTags(objTags); err != nil { 1409 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1410 return 1411 } 1412 1413 metadata[xhttp.AmzObjectTagging] = objTags 1414 } 1415 1416 var ( 1417 md5hex = clientETag.String() 1418 sha256hex = "" 1419 reader io.Reader = r.Body 1420 s3Err APIErrorCode 1421 putObject = objectAPI.PutObject 1422 ) 1423 1424 // Check if put is allowed 1425 if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Err != ErrNone { 1426 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1427 return 1428 } 1429 1430 switch rAuthType { 1431 case authTypeStreamingSigned: 1432 // Initialize stream signature verifier. 1433 reader, s3Err = newSignV4ChunkedReader(r) 1434 if s3Err != ErrNone { 1435 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1436 return 1437 } 1438 case authTypeSignedV2, authTypePresignedV2: 1439 s3Err = isReqAuthenticatedV2(r) 1440 if s3Err != ErrNone { 1441 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1442 return 1443 } 1444 1445 case authTypePresigned, authTypeSigned: 1446 if s3Err = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Err != ErrNone { 1447 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1448 return 1449 } 1450 if !skipContentSha256Cksum(r) { 1451 sha256hex = getContentSha256Cksum(r, serviceS3) 1452 } 1453 } 1454 1455 if err := enforceBucketQuota(ctx, bucket, size); err != nil { 1456 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1457 return 1458 } 1459 1460 // Check if bucket encryption is enabled 1461 _, err = globalBucketSSEConfigSys.Get(bucket) 1462 // This request header needs to be set prior to setting ObjectOptions 1463 if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) { 1464 r.Header.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 1465 } 1466 1467 actualSize := size 1468 if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 { 1469 // Storing the compression metadata. 1470 metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 1471 metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) 1472 1473 actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) 1474 if err != nil { 1475 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1476 return 1477 } 1478 1479 // Set compression metrics. 1480 s2c := newS2CompressReader(actualReader, actualSize) 1481 defer s2c.Close() 1482 reader = etag.Wrap(s2c, actualReader) 1483 size = -1 // Since compressed size is un-predictable. 1484 md5hex = "" // Do not try to verify the content. 1485 sha256hex = "" 1486 } 1487 1488 hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) 1489 if err != nil { 1490 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1491 return 1492 } 1493 1494 rawReader := hashReader 1495 pReader := NewPutObjReader(rawReader) 1496 1497 // get gateway encryption options 1498 var opts ObjectOptions 1499 opts, err = putOpts(ctx, r, bucket, object, metadata) 1500 if err != nil { 1501 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1502 return 1503 } 1504 1505 if api.CacheAPI() != nil { 1506 putObject = api.CacheAPI().PutObject 1507 } 1508 1509 retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction) 1510 holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction) 1511 1512 getObjectInfo := objectAPI.GetObjectInfo 1513 if api.CacheAPI() != nil { 1514 getObjectInfo = api.CacheAPI().GetObjectInfo 1515 } 1516 1517 retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms) 1518 if s3Err == ErrNone && retentionMode.Valid() { 1519 metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode) 1520 metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat) 1521 } 1522 if s3Err == ErrNone && legalHold.Status.Valid() { 1523 metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status) 1524 } 1525 if s3Err != ErrNone { 1526 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1527 return 1528 } 1529 if ok, _ := mustReplicate(ctx, r, bucket, object, metadata, ""); ok { 1530 metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() 1531 } 1532 if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() { 1533 if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.ReplicateObjectAction); s3Err != ErrNone { 1534 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1535 return 1536 } 1537 } 1538 var objectEncryptionKey crypto.ObjectKey 1539 if objectAPI.IsEncryptionSupported() { 1540 if _, ok := crypto.IsRequested(r.Header); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests 1541 if crypto.SSECopy.IsRequested(r.Header) { 1542 WriteErrorResponse(ctx, w, ToAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r)) 1543 return 1544 } 1545 1546 reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata) 1547 if err != nil { 1548 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1549 return 1550 } 1551 1552 wantSize := int64(-1) 1553 if size >= 0 { 1554 info := ObjectInfo{Size: size} 1555 wantSize = info.EncryptedSize() 1556 } 1557 1558 // do not try to verify encrypted content 1559 hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) 1560 if err != nil { 1561 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1562 return 1563 } 1564 pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey) 1565 if err != nil { 1566 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1567 return 1568 } 1569 } 1570 } 1571 1572 // Ensure that metadata does not contain sensitive information 1573 crypto.RemoveSensitiveEntries(metadata) 1574 1575 // Create the object.. 1576 objInfo, err := putObject(ctx, bucket, object, pReader, opts) 1577 if err != nil { 1578 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1579 return 1580 } 1581 1582 switch kind, encrypted := crypto.IsEncrypted(objInfo.UserDefined); { 1583 case encrypted: 1584 switch kind { 1585 case crypto.S3: 1586 w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 1587 objInfo.ETag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: objInfo.ETag}) 1588 case crypto.SSEC: 1589 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm)) 1590 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5)) 1591 1592 if len(objInfo.ETag) >= 32 && strings.Count(objInfo.ETag, "-") != 1 { 1593 objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:] 1594 } 1595 } 1596 case objInfo.IsCompressed(): 1597 if !strings.HasSuffix(objInfo.ETag, "-1") { 1598 objInfo.ETag = objInfo.ETag + "-1" 1599 } 1600 } 1601 if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, ""); replicate { 1602 scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType) 1603 } 1604 setPutObjHeaders(w, objInfo, false) 1605 1606 writeSuccessResponseHeadersOnly(w) 1607 1608 // Notify object created event. 1609 sendEvent(eventArgs{ 1610 EventName: event.ObjectCreatedPut, 1611 BucketName: bucket, 1612 Object: objInfo, 1613 ReqParams: extractReqParams(r), 1614 RespElements: extractRespElements(w), 1615 UserAgent: r.UserAgent(), 1616 Host: handlers.GetSourceIP(r), 1617 }) 1618 } 1619 1620 // PutObjectExtractHandler - PUT Object extract is an extended API 1621 // based off from AWS Snowball feature to auto extract compressed 1622 // stream will be extracted in the same directory it is stored in 1623 // and the folder structures will be built out accordingly. 1624 func (api ObjectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *http.Request) { 1625 ctx := NewContext(r, w, "PutObjectExtract") 1626 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 1627 1628 objectAPI := api.ObjectAPI() 1629 if objectAPI == nil { 1630 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 1631 return 1632 } 1633 1634 if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported 1635 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1636 return 1637 } 1638 1639 if _, ok := crypto.IsRequested(r.Header); ok { 1640 if GlobalIsGateway { 1641 if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { 1642 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1643 return 1644 } 1645 } else { 1646 if !objectAPI.IsEncryptionSupported() { 1647 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1648 return 1649 } 1650 } 1651 } 1652 1653 vars := mux.Vars(r) 1654 bucket := vars["bucket"] 1655 object, err := unescapePath(vars["object"]) 1656 if err != nil { 1657 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1658 return 1659 } 1660 1661 // X-Amz-Copy-Source shouldn't be set for this call. 1662 if _, ok := r.Header[xhttp.AmzCopySource]; ok { 1663 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r)) 1664 return 1665 } 1666 1667 // Validate storage class metadata if present 1668 sc := r.Header.Get(xhttp.AmzStorageClass) 1669 if sc != "" { 1670 if !storageclass.IsValid(sc) { 1671 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r)) 1672 return 1673 } 1674 } 1675 1676 clientETag, err := etag.FromContentMD5(r.Header) 1677 if err != nil { 1678 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL, guessIsBrowserReq(r)) 1679 return 1680 } 1681 1682 /// if Content-Length is unknown/missing, deny the request 1683 size := r.ContentLength 1684 rAuthType := getRequestAuthType(r) 1685 if rAuthType == authTypeStreamingSigned { 1686 if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok { 1687 if sizeStr[0] == "" { 1688 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r)) 1689 return 1690 } 1691 size, err = strconv.ParseInt(sizeStr[0], 10, 64) 1692 if err != nil { 1693 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1694 return 1695 } 1696 } 1697 } 1698 1699 if size == -1 { 1700 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r)) 1701 return 1702 } 1703 1704 /// maximum Upload size for objects in a single operation 1705 if isMaxObjectSize(size) { 1706 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r)) 1707 return 1708 } 1709 1710 var ( 1711 md5hex = clientETag.String() 1712 sha256hex = "" 1713 reader io.Reader = r.Body 1714 s3Err APIErrorCode 1715 putObject = objectAPI.PutObject 1716 ) 1717 1718 // Check if put is allowed 1719 if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Err != ErrNone { 1720 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1721 return 1722 } 1723 1724 switch rAuthType { 1725 case authTypeStreamingSigned: 1726 // Initialize stream signature verifier. 1727 reader, s3Err = newSignV4ChunkedReader(r) 1728 if s3Err != ErrNone { 1729 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1730 return 1731 } 1732 case authTypeSignedV2, authTypePresignedV2: 1733 s3Err = isReqAuthenticatedV2(r) 1734 if s3Err != ErrNone { 1735 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1736 return 1737 } 1738 1739 case authTypePresigned, authTypeSigned: 1740 if s3Err = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Err != ErrNone { 1741 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1742 return 1743 } 1744 if !skipContentSha256Cksum(r) { 1745 sha256hex = getContentSha256Cksum(r, serviceS3) 1746 } 1747 } 1748 1749 hreader, err := hash.NewReader(reader, size, md5hex, sha256hex, size) 1750 if err != nil { 1751 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1752 return 1753 } 1754 1755 if err := enforceBucketQuota(ctx, bucket, size); err != nil { 1756 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1757 return 1758 } 1759 1760 // Check if bucket encryption is enabled 1761 _, err = globalBucketSSEConfigSys.Get(bucket) 1762 // This request header needs to be set prior to setting ObjectOptions 1763 if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) { 1764 r.Header.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 1765 } 1766 1767 retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction) 1768 holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction) 1769 1770 if api.CacheAPI() != nil { 1771 putObject = api.CacheAPI().PutObject 1772 } 1773 1774 getObjectInfo := objectAPI.GetObjectInfo 1775 if api.CacheAPI() != nil { 1776 getObjectInfo = api.CacheAPI().GetObjectInfo 1777 } 1778 1779 putObjectTar := func(reader io.Reader, info os.FileInfo, object string) { 1780 size := info.Size() 1781 metadata := map[string]string{ 1782 xhttp.AmzStorageClass: sc, 1783 } 1784 1785 actualSize := size 1786 if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 { 1787 // Storing the compression metadata. 1788 metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 1789 metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) 1790 1791 actualReader, err := hash.NewReader(reader, size, "", "", actualSize) 1792 if err != nil { 1793 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1794 return 1795 } 1796 1797 // Set compression metrics. 1798 s2c := newS2CompressReader(actualReader, actualSize) 1799 defer s2c.Close() 1800 reader = etag.Wrap(s2c, actualReader) 1801 size = -1 // Since compressed size is un-predictable. 1802 } 1803 1804 hashReader, err := hash.NewReader(reader, size, "", "", actualSize) 1805 if err != nil { 1806 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1807 return 1808 } 1809 1810 rawReader := hashReader 1811 pReader := NewPutObjReader(rawReader) 1812 1813 // get encryption options 1814 opts, err := putOpts(ctx, r, bucket, object, metadata) 1815 if err != nil { 1816 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1817 return 1818 } 1819 opts.MTime = info.ModTime() 1820 1821 retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms) 1822 if s3Err == ErrNone && retentionMode.Valid() { 1823 metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode) 1824 metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat) 1825 } 1826 1827 if s3Err == ErrNone && legalHold.Status.Valid() { 1828 metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status) 1829 } 1830 1831 if s3Err != ErrNone { 1832 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1833 return 1834 } 1835 1836 if ok, _ := mustReplicate(ctx, r, bucket, object, metadata, ""); ok { 1837 metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() 1838 } 1839 1840 if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() { 1841 if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.ReplicateObjectAction); s3Err != ErrNone { 1842 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 1843 return 1844 } 1845 } 1846 1847 var objectEncryptionKey crypto.ObjectKey 1848 if objectAPI.IsEncryptionSupported() { 1849 if _, ok := crypto.IsRequested(r.Header); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests 1850 if crypto.SSECopy.IsRequested(r.Header) { 1851 WriteErrorResponse(ctx, w, ToAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r)) 1852 return 1853 } 1854 1855 reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata) 1856 if err != nil { 1857 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1858 return 1859 } 1860 1861 wantSize := int64(-1) 1862 if size >= 0 { 1863 info := ObjectInfo{Size: size} 1864 wantSize = info.EncryptedSize() 1865 } 1866 1867 // do not try to verify encrypted content 1868 hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) 1869 if err != nil { 1870 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1871 return 1872 } 1873 1874 pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey) 1875 if err != nil { 1876 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1877 return 1878 } 1879 } 1880 } 1881 1882 // Ensure that metadata does not contain sensitive information 1883 crypto.RemoveSensitiveEntries(metadata) 1884 1885 // Create the object.. 1886 objInfo, err := putObject(ctx, bucket, object, pReader, opts) 1887 if err != nil { 1888 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1889 return 1890 } 1891 1892 if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, ""); replicate { 1893 scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType) 1894 } 1895 1896 } 1897 1898 untar(hreader, putObjectTar) 1899 1900 w.Header()[xhttp.ETag] = []string{`"` + hex.EncodeToString(hreader.MD5Current()) + `"`} 1901 writeSuccessResponseHeadersOnly(w) 1902 } 1903 1904 /// Multipart ObjectAPIHandlers 1905 1906 // NewMultipartUploadHandler - New multipart upload. 1907 // Notice: The S3 client can send secret keys in headers for encryption related jobs, 1908 // the handler should ensure to remove these keys before sending them to the object layer. 1909 // Currently these keys are: 1910 // - X-Amz-Server-Side-Encryption-Customer-Key 1911 // - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key 1912 func (api ObjectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { 1913 ctx := NewContext(r, w, "NewMultipartUpload") 1914 1915 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 1916 1917 objectAPI := api.ObjectAPI() 1918 if objectAPI == nil { 1919 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 1920 return 1921 } 1922 1923 if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported 1924 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1925 return 1926 } 1927 1928 if _, ok := crypto.IsRequested(r.Header); ok { 1929 if GlobalIsGateway { 1930 if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { 1931 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1932 return 1933 } 1934 } else { 1935 if !objectAPI.IsEncryptionSupported() { 1936 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 1937 return 1938 } 1939 } 1940 } 1941 1942 vars := mux.Vars(r) 1943 bucket := vars["bucket"] 1944 object, err := unescapePath(vars["object"]) 1945 if err != nil { 1946 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1947 return 1948 } 1949 1950 if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone { 1951 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 1952 return 1953 } 1954 1955 // Check if bucket encryption is enabled 1956 _, err = globalBucketSSEConfigSys.Get(bucket) 1957 // This request header needs to be set prior to setting ObjectOptions 1958 if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) { 1959 r.Header.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 1960 } 1961 1962 // Validate storage class metadata if present 1963 if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" { 1964 if !storageclass.IsValid(sc) { 1965 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL, guessIsBrowserReq(r)) 1966 return 1967 } 1968 } 1969 1970 var encMetadata = map[string]string{} 1971 1972 if objectAPI.IsEncryptionSupported() { 1973 if _, ok := crypto.IsRequested(r.Header); ok { 1974 if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil { 1975 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1976 return 1977 } 1978 // Set this for multipart only operations, we need to differentiate during 1979 // decryption if the file was actually multipart or not. 1980 encMetadata[ReservedMetadataPrefix+"Encrypted-Multipart"] = "" 1981 } 1982 } 1983 1984 // Extract metadata that needs to be saved. 1985 metadata, err := extractMetadata(ctx, r) 1986 if err != nil { 1987 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 1988 return 1989 } 1990 1991 retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction) 1992 holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction) 1993 1994 getObjectInfo := objectAPI.GetObjectInfo 1995 if api.CacheAPI() != nil { 1996 getObjectInfo = api.CacheAPI().GetObjectInfo 1997 } 1998 1999 retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms) 2000 if s3Err == ErrNone && retentionMode.Valid() { 2001 metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode) 2002 metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat) 2003 } 2004 if s3Err == ErrNone && legalHold.Status.Valid() { 2005 metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status) 2006 } 2007 if s3Err != ErrNone { 2008 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 2009 return 2010 } 2011 if ok, _ := mustReplicate(ctx, r, bucket, object, metadata, ""); ok { 2012 metadata[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() 2013 } 2014 // We need to preserve the encryption headers set in EncryptRequest, 2015 // so we do not want to override them, copy them instead. 2016 for k, v := range encMetadata { 2017 metadata[k] = v 2018 } 2019 2020 // Ensure that metadata does not contain sensitive information 2021 crypto.RemoveSensitiveEntries(metadata) 2022 2023 if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) { 2024 // Storing the compression metadata. 2025 metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 2026 } 2027 2028 opts, err := putOpts(ctx, r, bucket, object, metadata) 2029 if err != nil { 2030 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2031 return 2032 } 2033 newMultipartUpload := objectAPI.NewMultipartUpload 2034 2035 uploadID, err := newMultipartUpload(ctx, bucket, object, opts) 2036 if err != nil { 2037 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2038 return 2039 } 2040 2041 response := generateInitiateMultipartUploadResponse(bucket, object, uploadID) 2042 encodedSuccessResponse := EncodeResponse(response) 2043 2044 // Write success response. 2045 WriteSuccessResponseXML(w, encodedSuccessResponse) 2046 } 2047 2048 // CopyObjectPartHandler - uploads a part by copying data from an existing object as data source. 2049 func (api ObjectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { 2050 ctx := NewContext(r, w, "CopyObjectPart") 2051 2052 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 2053 2054 objectAPI := api.ObjectAPI() 2055 if objectAPI == nil { 2056 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 2057 return 2058 } 2059 2060 if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported 2061 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 2062 return 2063 } 2064 2065 if _, ok := crypto.IsRequested(r.Header); !objectAPI.IsEncryptionSupported() && ok { 2066 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 2067 return 2068 } 2069 2070 vars := mux.Vars(r) 2071 dstBucket := vars["bucket"] 2072 dstObject, err := unescapePath(vars["object"]) 2073 if err != nil { 2074 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2075 return 2076 } 2077 2078 if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone { 2079 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2080 return 2081 } 2082 2083 // Read escaped copy source path to check for parameters. 2084 cpSrcPath := r.Header.Get(xhttp.AmzCopySource) 2085 var vid string 2086 if u, err := url.Parse(cpSrcPath); err == nil { 2087 vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID)) 2088 // Note that url.Parse does the unescaping 2089 cpSrcPath = u.Path 2090 } 2091 2092 srcBucket, srcObject := path2BucketObject(cpSrcPath) 2093 // If source object is empty or bucket is empty, reply back invalid copy source. 2094 if srcObject == "" || srcBucket == "" { 2095 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r)) 2096 return 2097 } 2098 2099 if vid != "" && vid != nullVersionID { 2100 _, err := uuid.Parse(vid) 2101 if err != nil { 2102 WriteErrorResponse(ctx, w, ToAPIError(ctx, VersionNotFound{ 2103 Bucket: srcBucket, 2104 Object: srcObject, 2105 VersionID: vid, 2106 }), r.URL, guessIsBrowserReq(r)) 2107 return 2108 } 2109 } 2110 2111 if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone { 2112 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2113 return 2114 } 2115 2116 uploadID := r.URL.Query().Get(xhttp.UploadID) 2117 partIDString := r.URL.Query().Get(xhttp.PartNumber) 2118 2119 partID, err := strconv.Atoi(partIDString) 2120 if err != nil { 2121 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL, guessIsBrowserReq(r)) 2122 return 2123 } 2124 2125 // check partID with maximum part ID for multipart objects 2126 if isMaxPartID(partID) { 2127 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL, guessIsBrowserReq(r)) 2128 return 2129 } 2130 2131 var srcOpts, dstOpts ObjectOptions 2132 srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject) 2133 if err != nil { 2134 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2135 return 2136 } 2137 srcOpts.VersionID = vid 2138 2139 // convert copy src and dst encryption options for GET/PUT calls 2140 var getOpts = ObjectOptions{VersionID: srcOpts.VersionID} 2141 if srcOpts.ServerSideEncryption != nil { 2142 getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption) 2143 } 2144 2145 dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil) 2146 if err != nil { 2147 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2148 return 2149 } 2150 2151 getObjectNInfo := objectAPI.GetObjectNInfo 2152 if api.CacheAPI() != nil { 2153 getObjectNInfo = api.CacheAPI().GetObjectNInfo 2154 } 2155 2156 // Get request range. 2157 var rs *HTTPRangeSpec 2158 var parseRangeErr error 2159 if rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange); rangeHeader != "" { 2160 rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader) 2161 } 2162 2163 checkCopyPartPrecondFn := func(o ObjectInfo) bool { 2164 if objectAPI.IsEncryptionSupported() { 2165 if _, err := DecryptObjectInfo(&o, r); err != nil { 2166 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2167 return true 2168 } 2169 } 2170 if checkCopyObjectPartPreconditions(ctx, w, r, o) { 2171 return true 2172 } 2173 if parseRangeErr != nil { 2174 logger.LogIf(ctx, parseRangeErr) 2175 writeCopyPartErr(ctx, w, parseRangeErr, r.URL, guessIsBrowserReq(r)) 2176 // Range header mismatch is pre-condition like failure 2177 // so return true to indicate Range precondition failed. 2178 return true 2179 } 2180 return false 2181 } 2182 getOpts.CheckPrecondFn = checkCopyPartPrecondFn 2183 gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts) 2184 if err != nil { 2185 if isErrPreconditionFailed(err) { 2186 return 2187 } 2188 if globalBucketVersioningSys.Enabled(srcBucket) && gr != nil { 2189 // Versioning enabled quite possibly object is deleted might be delete-marker 2190 // if present set the headers, no idea why AWS S3 sets these headers. 2191 if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker { 2192 w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID} 2193 w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)} 2194 } 2195 } 2196 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2197 return 2198 } 2199 defer gr.Close() 2200 srcInfo := gr.ObjInfo 2201 2202 actualPartSize := srcInfo.Size 2203 if _, ok := crypto.IsEncrypted(srcInfo.UserDefined); ok { 2204 actualPartSize, err = srcInfo.GetActualSize() 2205 if err != nil { 2206 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2207 return 2208 } 2209 } 2210 2211 if err := enforceBucketQuota(ctx, dstBucket, actualPartSize); err != nil { 2212 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2213 return 2214 } 2215 2216 // Special care for CopyObjectPart 2217 if partRangeErr := checkCopyPartRangeWithSize(rs, actualPartSize); partRangeErr != nil { 2218 writeCopyPartErr(ctx, w, partRangeErr, r.URL, guessIsBrowserReq(r)) 2219 return 2220 } 2221 2222 // Get the object offset & length 2223 startOffset, length, err := rs.GetOffsetLength(actualPartSize) 2224 if err != nil { 2225 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2226 return 2227 } 2228 2229 /// maximum copy size for multipart objects in a single operation 2230 if isMaxAllowedPartSize(length) { 2231 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r)) 2232 return 2233 } 2234 2235 actualPartSize = length 2236 var reader io.Reader = etag.NewReader(gr, nil) 2237 2238 mi, err := objectAPI.GetMultipartInfo(ctx, dstBucket, dstObject, uploadID, dstOpts) 2239 if err != nil { 2240 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2241 return 2242 } 2243 2244 // Read compression metadata preserved in the init multipart for the decision. 2245 _, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"] 2246 // Compress only if the compression is enabled during initial multipart. 2247 if isCompressed { 2248 s2c := newS2CompressReader(reader, actualPartSize) 2249 defer s2c.Close() 2250 reader = etag.Wrap(s2c, reader) 2251 length = -1 2252 } 2253 2254 srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize) 2255 if err != nil { 2256 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2257 return 2258 } 2259 2260 dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, mi.UserDefined) 2261 if err != nil { 2262 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2263 return 2264 } 2265 2266 rawReader := srcInfo.Reader 2267 pReader := NewPutObjReader(rawReader) 2268 2269 _, isEncrypted := crypto.IsEncrypted(mi.UserDefined) 2270 var objectEncryptionKey crypto.ObjectKey 2271 if objectAPI.IsEncryptionSupported() && isEncrypted { 2272 if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) { 2273 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r)) 2274 return 2275 } 2276 if crypto.S3.IsEncrypted(mi.UserDefined) && crypto.SSEC.IsRequested(r.Header) { 2277 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r)) 2278 return 2279 } 2280 var key []byte 2281 if crypto.SSEC.IsRequested(r.Header) { 2282 key, err = ParseSSECustomerRequest(r) 2283 if err != nil { 2284 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2285 return 2286 } 2287 } 2288 key, err = decryptObjectInfo(key, dstBucket, dstObject, mi.UserDefined) 2289 if err != nil { 2290 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2291 return 2292 } 2293 copy(objectEncryptionKey[:], key) 2294 2295 partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID)) 2296 encReader, err := sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.CipherSuitesDARE()}) 2297 if err != nil { 2298 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2299 return 2300 } 2301 reader = etag.Wrap(encReader, reader) 2302 2303 wantSize := int64(-1) 2304 if length >= 0 { 2305 info := ObjectInfo{Size: length} 2306 wantSize = info.EncryptedSize() 2307 } 2308 2309 srcInfo.Reader, err = hash.NewReader(reader, wantSize, "", "", actualPartSize) 2310 if err != nil { 2311 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2312 return 2313 } 2314 pReader, err = pReader.WithEncryption(srcInfo.Reader, &objectEncryptionKey) 2315 if err != nil { 2316 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2317 return 2318 } 2319 } 2320 2321 srcInfo.PutObjReader = pReader 2322 // Copy source object to destination, if source and destination 2323 // object is same then only metadata is updated. 2324 partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, 2325 startOffset, length, srcInfo, srcOpts, dstOpts) 2326 if err != nil { 2327 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2328 return 2329 } 2330 2331 if isEncrypted { 2332 partInfo.ETag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, crypto.SSEC.IsRequested(r.Header)) 2333 } 2334 2335 response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified) 2336 encodedSuccessResponse := EncodeResponse(response) 2337 2338 // Write success response. 2339 WriteSuccessResponseXML(w, encodedSuccessResponse) 2340 } 2341 2342 // PutObjectPartHandler - uploads an incoming part for an ongoing multipart operation. 2343 func (api ObjectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) { 2344 ctx := NewContext(r, w, "PutObjectPart") 2345 2346 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 2347 2348 objectAPI := api.ObjectAPI() 2349 if objectAPI == nil { 2350 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 2351 return 2352 } 2353 2354 if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported 2355 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 2356 return 2357 } 2358 2359 if _, ok := crypto.IsRequested(r.Header); ok { 2360 if GlobalIsGateway { 2361 if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { 2362 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 2363 return 2364 } 2365 } else { 2366 if !objectAPI.IsEncryptionSupported() { 2367 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 2368 return 2369 } 2370 } 2371 } 2372 2373 vars := mux.Vars(r) 2374 bucket := vars["bucket"] 2375 object, err := unescapePath(vars["object"]) 2376 if err != nil { 2377 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2378 return 2379 } 2380 2381 // X-Amz-Copy-Source shouldn't be set for this call. 2382 if _, ok := r.Header[xhttp.AmzCopySource]; ok { 2383 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r)) 2384 return 2385 } 2386 2387 clientETag, err := etag.FromContentMD5(r.Header) 2388 if err != nil { 2389 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL, guessIsBrowserReq(r)) 2390 return 2391 } 2392 2393 /// if Content-Length is unknown/missing, throw away 2394 size := r.ContentLength 2395 2396 rAuthType := getRequestAuthType(r) 2397 // For auth type streaming signature, we need to gather a different content length. 2398 if rAuthType == authTypeStreamingSigned { 2399 if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok { 2400 if sizeStr[0] == "" { 2401 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r)) 2402 return 2403 } 2404 size, err = strconv.ParseInt(sizeStr[0], 10, 64) 2405 if err != nil { 2406 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2407 return 2408 } 2409 } 2410 } 2411 if size == -1 { 2412 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r)) 2413 return 2414 } 2415 2416 /// maximum Upload size for multipart objects in a single operation 2417 if isMaxAllowedPartSize(size) { 2418 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL, guessIsBrowserReq(r)) 2419 return 2420 } 2421 2422 uploadID := r.URL.Query().Get(xhttp.UploadID) 2423 partIDString := r.URL.Query().Get(xhttp.PartNumber) 2424 2425 partID, err := strconv.Atoi(partIDString) 2426 if err != nil { 2427 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL, guessIsBrowserReq(r)) 2428 return 2429 } 2430 2431 // check partID with maximum part ID for multipart objects 2432 if isMaxPartID(partID) { 2433 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL, guessIsBrowserReq(r)) 2434 return 2435 } 2436 2437 var ( 2438 md5hex = clientETag.String() 2439 sha256hex = "" 2440 reader io.Reader = r.Body 2441 s3Error APIErrorCode 2442 ) 2443 if s3Error = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Error != ErrNone { 2444 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2445 return 2446 } 2447 2448 switch rAuthType { 2449 case authTypeStreamingSigned: 2450 // Initialize stream signature verifier. 2451 reader, s3Error = newSignV4ChunkedReader(r) 2452 if s3Error != ErrNone { 2453 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2454 return 2455 } 2456 case authTypeSignedV2, authTypePresignedV2: 2457 if s3Error = isReqAuthenticatedV2(r); s3Error != ErrNone { 2458 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2459 return 2460 } 2461 case authTypePresigned, authTypeSigned: 2462 if s3Error = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Error != ErrNone { 2463 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2464 return 2465 } 2466 2467 if !skipContentSha256Cksum(r) { 2468 sha256hex = getContentSha256Cksum(r, serviceS3) 2469 } 2470 } 2471 2472 if err := enforceBucketQuota(ctx, bucket, size); err != nil { 2473 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2474 return 2475 } 2476 2477 actualSize := size 2478 2479 // get encryption options 2480 var opts ObjectOptions 2481 if crypto.SSEC.IsRequested(r.Header) { 2482 opts, err = getOpts(ctx, r, bucket, object) 2483 if err != nil { 2484 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2485 return 2486 } 2487 } 2488 2489 mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, opts) 2490 if err != nil { 2491 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2492 return 2493 } 2494 2495 // Read compression metadata preserved in the init multipart for the decision. 2496 _, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"] 2497 2498 if objectAPI.IsCompressionSupported() && isCompressed { 2499 actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) 2500 if err != nil { 2501 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2502 return 2503 } 2504 2505 // Set compression metrics. 2506 s2c := newS2CompressReader(actualReader, actualSize) 2507 defer s2c.Close() 2508 reader = etag.Wrap(s2c, actualReader) 2509 size = -1 // Since compressed size is un-predictable. 2510 md5hex = "" // Do not try to verify the content. 2511 sha256hex = "" 2512 } 2513 2514 hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) 2515 if err != nil { 2516 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2517 return 2518 } 2519 rawReader := hashReader 2520 pReader := NewPutObjReader(rawReader) 2521 2522 _, isEncrypted := crypto.IsEncrypted(mi.UserDefined) 2523 var objectEncryptionKey crypto.ObjectKey 2524 if objectAPI.IsEncryptionSupported() && isEncrypted { 2525 if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) { 2526 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL, guessIsBrowserReq(r)) 2527 return 2528 } 2529 2530 opts, err = putOpts(ctx, r, bucket, object, mi.UserDefined) 2531 if err != nil { 2532 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2533 return 2534 } 2535 2536 var key []byte 2537 if crypto.SSEC.IsRequested(r.Header) { 2538 key, err = ParseSSECustomerRequest(r) 2539 if err != nil { 2540 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2541 return 2542 } 2543 } 2544 2545 // Calculating object encryption key 2546 key, err = decryptObjectInfo(key, bucket, object, mi.UserDefined) 2547 if err != nil { 2548 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2549 return 2550 } 2551 copy(objectEncryptionKey[:], key) 2552 2553 partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID)) 2554 in := io.Reader(hashReader) 2555 if size > encryptBufferThreshold { 2556 // The encryption reads in blocks of 64KB. 2557 // We add a buffer on bigger files to reduce the number of syscalls upstream. 2558 in = bufio.NewReaderSize(hashReader, encryptBufferSize) 2559 } 2560 reader, err = sio.EncryptReader(in, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.CipherSuitesDARE()}) 2561 if err != nil { 2562 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2563 return 2564 } 2565 wantSize := int64(-1) 2566 if size >= 0 { 2567 info := ObjectInfo{Size: size} 2568 wantSize = info.EncryptedSize() 2569 } 2570 // do not try to verify encrypted content 2571 hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) 2572 if err != nil { 2573 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2574 return 2575 } 2576 pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey) 2577 if err != nil { 2578 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2579 return 2580 } 2581 } 2582 2583 putObjectPart := objectAPI.PutObjectPart 2584 2585 partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, pReader, opts) 2586 if err != nil { 2587 // Verify if the underlying error is signature mismatch. 2588 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2589 return 2590 } 2591 2592 etag := partInfo.ETag 2593 switch kind, encrypted := crypto.IsEncrypted(mi.UserDefined); { 2594 case encrypted: 2595 switch kind { 2596 case crypto.S3: 2597 w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) 2598 etag = tryDecryptETag(objectEncryptionKey[:], etag, false) 2599 case crypto.SSEC: 2600 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm)) 2601 w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5)) 2602 2603 if len(etag) >= 32 && strings.Count(etag, "-") != 1 { 2604 etag = etag[len(etag)-32:] 2605 } 2606 } 2607 } 2608 2609 // We must not use the http.Header().Set method here because some (broken) 2610 // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). 2611 // Therefore, we have to set the ETag directly as map entry. 2612 w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""} 2613 2614 writeSuccessResponseHeadersOnly(w) 2615 } 2616 2617 // AbortMultipartUploadHandler - Abort multipart upload 2618 func (api ObjectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { 2619 ctx := NewContext(r, w, "AbortMultipartUpload") 2620 2621 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 2622 2623 vars := mux.Vars(r) 2624 bucket := vars["bucket"] 2625 object, err := unescapePath(vars["object"]) 2626 if err != nil { 2627 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2628 return 2629 } 2630 2631 objectAPI := api.ObjectAPI() 2632 if objectAPI == nil { 2633 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 2634 return 2635 } 2636 abortMultipartUpload := objectAPI.AbortMultipartUpload 2637 2638 if s3Error := checkRequestAuthType(ctx, r, policy.AbortMultipartUploadAction, bucket, object); s3Error != ErrNone { 2639 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2640 return 2641 } 2642 2643 uploadID, _, _, _, s3Error := getObjectResources(r.URL.Query()) 2644 if s3Error != ErrNone { 2645 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2646 return 2647 } 2648 opts := ObjectOptions{} 2649 if err := abortMultipartUpload(ctx, bucket, object, uploadID, opts); err != nil { 2650 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2651 return 2652 } 2653 2654 writeSuccessNoContent(w) 2655 } 2656 2657 // ListObjectPartsHandler - List object parts 2658 func (api ObjectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) { 2659 ctx := NewContext(r, w, "ListObjectParts") 2660 2661 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 2662 2663 vars := mux.Vars(r) 2664 bucket := vars["bucket"] 2665 object, err := unescapePath(vars["object"]) 2666 if err != nil { 2667 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2668 return 2669 } 2670 2671 objectAPI := api.ObjectAPI() 2672 if objectAPI == nil { 2673 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 2674 return 2675 } 2676 2677 if s3Error := checkRequestAuthType(ctx, r, policy.ListMultipartUploadPartsAction, bucket, object); s3Error != ErrNone { 2678 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2679 return 2680 } 2681 2682 uploadID, partNumberMarker, maxParts, encodingType, s3Error := getObjectResources(r.URL.Query()) 2683 if s3Error != ErrNone { 2684 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2685 return 2686 } 2687 if partNumberMarker < 0 { 2688 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartNumberMarker), r.URL, guessIsBrowserReq(r)) 2689 return 2690 } 2691 if maxParts < 0 { 2692 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL, guessIsBrowserReq(r)) 2693 return 2694 } 2695 2696 opts := ObjectOptions{} 2697 listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) 2698 if err != nil { 2699 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2700 return 2701 } 2702 2703 var ssec bool 2704 if _, ok := crypto.IsEncrypted(listPartsInfo.UserDefined); ok && objectAPI.IsEncryptionSupported() { 2705 var key []byte 2706 if crypto.SSEC.IsEncrypted(listPartsInfo.UserDefined) { 2707 ssec = true 2708 } 2709 var objectEncryptionKey []byte 2710 if crypto.S3.IsEncrypted(listPartsInfo.UserDefined) { 2711 // Calculating object encryption key 2712 objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, listPartsInfo.UserDefined) 2713 if err != nil { 2714 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2715 return 2716 } 2717 } 2718 for i := range listPartsInfo.Parts { 2719 curp := listPartsInfo.Parts[i] 2720 curp.ETag = tryDecryptETag(objectEncryptionKey, curp.ETag, ssec) 2721 if !ssec { 2722 var partSize uint64 2723 partSize, err = sio.DecryptedSize(uint64(curp.Size)) 2724 if err != nil { 2725 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2726 return 2727 } 2728 curp.Size = int64(partSize) 2729 } 2730 listPartsInfo.Parts[i] = curp 2731 } 2732 } 2733 2734 response := generateListPartsResponse(listPartsInfo, encodingType) 2735 encodedSuccessResponse := EncodeResponse(response) 2736 2737 // Write success response. 2738 WriteSuccessResponseXML(w, encodedSuccessResponse) 2739 } 2740 2741 type whiteSpaceWriter struct { 2742 http.ResponseWriter 2743 http.Flusher 2744 written bool 2745 } 2746 2747 func (w *whiteSpaceWriter) Write(b []byte) (n int, err error) { 2748 n, err = w.ResponseWriter.Write(b) 2749 w.written = true 2750 return 2751 } 2752 2753 func (w *whiteSpaceWriter) WriteHeader(statusCode int) { 2754 if !w.written { 2755 w.ResponseWriter.WriteHeader(statusCode) 2756 } 2757 } 2758 2759 // Send empty whitespaces every 10 seconds to the client till completeMultiPartUpload() is 2760 // done so that the client does not time out. Downside is we might send 200 OK and 2761 // then send error XML. But accoording to S3 spec the client is supposed to check 2762 // for error XML even if it received 200 OK. But for erasure this is not a problem 2763 // as completeMultiPartUpload() is quick. Even For FS, it would not be an issue as 2764 // we do background append as and when the parts arrive and completeMultiPartUpload 2765 // is quick. Only in a rare case where parts would be out of order will 2766 // FS:completeMultiPartUpload() take a longer time. 2767 func sendWhiteSpace(w http.ResponseWriter) <-chan bool { 2768 doneCh := make(chan bool) 2769 go func() { 2770 ticker := time.NewTicker(time.Second * 10) 2771 headerWritten := false 2772 for { 2773 select { 2774 case <-ticker.C: 2775 // Write header if not written yet. 2776 if !headerWritten { 2777 w.Write([]byte(xml.Header)) 2778 headerWritten = true 2779 } 2780 2781 // Once header is written keep writing empty spaces 2782 // which are ignored by client SDK XML parsers. 2783 // This occurs when server takes long time to completeMultiPartUpload() 2784 w.Write([]byte(" ")) 2785 w.(http.Flusher).Flush() 2786 case doneCh <- headerWritten: 2787 ticker.Stop() 2788 return 2789 } 2790 } 2791 2792 }() 2793 return doneCh 2794 } 2795 2796 // CompleteMultipartUploadHandler - Complete multipart upload. 2797 func (api ObjectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { 2798 ctx := NewContext(r, w, "CompleteMultipartUpload") 2799 2800 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 2801 2802 vars := mux.Vars(r) 2803 bucket := vars["bucket"] 2804 object, err := unescapePath(vars["object"]) 2805 if err != nil { 2806 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2807 return 2808 } 2809 2810 objectAPI := api.ObjectAPI() 2811 if objectAPI == nil { 2812 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 2813 return 2814 } 2815 2816 if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone { 2817 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2818 return 2819 } 2820 2821 // Content-Length is required and should be non-zero 2822 if r.ContentLength <= 0 { 2823 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r)) 2824 return 2825 } 2826 2827 // Get upload id. 2828 uploadID, _, _, _, s3Error := getObjectResources(r.URL.Query()) 2829 if s3Error != ErrNone { 2830 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 2831 return 2832 } 2833 2834 complMultipartUpload := &CompleteMultipartUpload{} 2835 if err = xmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil { 2836 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2837 return 2838 } 2839 if len(complMultipartUpload.Parts) == 0 { 2840 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r)) 2841 return 2842 } 2843 if !sort.IsSorted(CompletedParts(complMultipartUpload.Parts)) { 2844 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartOrder), r.URL, guessIsBrowserReq(r)) 2845 return 2846 } 2847 2848 // Reject retention or governance headers if set, CompleteMultipartUpload spec 2849 // does not use these headers, and should not be passed down to checkPutObjectLockAllowed 2850 if objectlock.IsObjectLockRequested(r.Header) || objectlock.IsObjectLockGovernanceBypassSet(r.Header) { 2851 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r)) 2852 return 2853 } 2854 2855 if _, _, _, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, objectAPI.GetObjectInfo, ErrNone, ErrNone); s3Err != ErrNone { 2856 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 2857 return 2858 } 2859 2860 var objectEncryptionKey []byte 2861 var isEncrypted, ssec bool 2862 if objectAPI.IsEncryptionSupported() { 2863 mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, ObjectOptions{}) 2864 if err != nil { 2865 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2866 return 2867 } 2868 if _, ok := crypto.IsEncrypted(mi.UserDefined); ok { 2869 var key []byte 2870 isEncrypted = true 2871 ssec = crypto.SSEC.IsEncrypted(mi.UserDefined) 2872 if crypto.S3.IsEncrypted(mi.UserDefined) { 2873 // Calculating object encryption key 2874 objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, mi.UserDefined) 2875 if err != nil { 2876 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2877 return 2878 } 2879 } 2880 } 2881 } 2882 2883 partsMap := make(map[string]PartInfo) 2884 if isEncrypted { 2885 maxParts := 10000 2886 listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, maxParts, ObjectOptions{}) 2887 if err != nil { 2888 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2889 return 2890 } 2891 for _, part := range listPartsInfo.Parts { 2892 partsMap[strconv.Itoa(part.PartNumber)] = part 2893 } 2894 } 2895 2896 // Complete parts. 2897 completeParts := make([]CompletePart, 0, len(complMultipartUpload.Parts)) 2898 for _, part := range complMultipartUpload.Parts { 2899 part.ETag = canonicalizeETag(part.ETag) 2900 if isEncrypted { 2901 // ETag is stored in the backend in encrypted form. Validate client sent ETag with 2902 // decrypted ETag. 2903 if bkPartInfo, ok := partsMap[strconv.Itoa(part.PartNumber)]; ok { 2904 bkETag := tryDecryptETag(objectEncryptionKey, bkPartInfo.ETag, ssec) 2905 if bkETag != part.ETag { 2906 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL, guessIsBrowserReq(r)) 2907 return 2908 } 2909 part.ETag = bkPartInfo.ETag 2910 } 2911 } 2912 completeParts = append(completeParts, part) 2913 } 2914 2915 completeMultiPartUpload := objectAPI.CompleteMultipartUpload 2916 2917 // This code is specifically to handle the requirements for slow 2918 // complete multipart upload operations on FS mode. 2919 writeErrorResponseWithoutXMLHeader := func(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) { 2920 switch err.Code { 2921 case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum": 2922 // Set retxry-after header to indicate user-agents to retry request after 120secs. 2923 // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After 2924 w.Header().Set(xhttp.RetryAfter, "120") 2925 } 2926 2927 // Generate error response. 2928 errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, 2929 w.Header().Get(xhttp.AmzRequestID), globalDeploymentID) 2930 encodedErrorResponse, _ := xml.Marshal(errorResponse) 2931 setCommonHeaders(w) 2932 w.Header().Set(xhttp.ContentType, string(mimeXML)) 2933 w.Write(encodedErrorResponse) 2934 w.(http.Flusher).Flush() 2935 } 2936 2937 setEventStreamHeaders(w) 2938 2939 w = &whiteSpaceWriter{ResponseWriter: w, Flusher: w.(http.Flusher)} 2940 completeDoneCh := sendWhiteSpace(w) 2941 objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, completeParts, ObjectOptions{}) 2942 // Stop writing white spaces to the client. Note that close(doneCh) style is not used as it 2943 // can cause white space to be written after we send XML response in a race condition. 2944 headerWritten := <-completeDoneCh 2945 if err != nil { 2946 if headerWritten { 2947 writeErrorResponseWithoutXMLHeader(ctx, w, ToAPIError(ctx, err), r.URL) 2948 } else { 2949 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 2950 } 2951 return 2952 } 2953 2954 // Get object location. 2955 location := getObjectLocation(r, globalDomainNames, bucket, object) 2956 // Generate complete multipart response. 2957 response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.ETag) 2958 var encodedSuccessResponse []byte 2959 if !headerWritten { 2960 encodedSuccessResponse = EncodeResponse(response) 2961 } else { 2962 encodedSuccessResponse, err = xml.Marshal(response) 2963 if err != nil { 2964 writeErrorResponseWithoutXMLHeader(ctx, w, ToAPIError(ctx, err), r.URL) 2965 return 2966 } 2967 } 2968 2969 setPutObjHeaders(w, objInfo, false) 2970 if replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, objInfo.ReplicationStatus.String()); replicate { 2971 scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType) 2972 } 2973 2974 // Write success response. 2975 WriteSuccessResponseXML(w, encodedSuccessResponse) 2976 2977 // Notify object created event. 2978 sendEvent(eventArgs{ 2979 EventName: event.ObjectCreatedCompleteMultipartUpload, 2980 BucketName: bucket, 2981 Object: objInfo, 2982 ReqParams: extractReqParams(r), 2983 RespElements: extractRespElements(w), 2984 UserAgent: r.UserAgent(), 2985 Host: handlers.GetSourceIP(r), 2986 }) 2987 } 2988 2989 /// Delete ObjectAPIHandlers 2990 2991 // DeleteObjectHandler - delete an object 2992 func (api ObjectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { 2993 ctx := NewContext(r, w, "DeleteObject") 2994 2995 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 2996 2997 vars := mux.Vars(r) 2998 bucket := vars["bucket"] 2999 object, err := unescapePath(vars["object"]) 3000 if err != nil { 3001 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3002 return 3003 } 3004 3005 objectAPI := api.ObjectAPI() 3006 if objectAPI == nil { 3007 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3008 return 3009 } 3010 3011 if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object); s3Error != ErrNone { 3012 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 3013 return 3014 } 3015 3016 getObjectInfo := objectAPI.GetObjectInfo 3017 if api.CacheAPI() != nil { 3018 getObjectInfo = api.CacheAPI().GetObjectInfo 3019 } 3020 3021 opts, err := delOpts(ctx, r, bucket, object) 3022 if err != nil { 3023 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3024 return 3025 } 3026 var ( 3027 hasLockEnabled, hasLifecycleConfig bool 3028 goi ObjectInfo 3029 gerr error 3030 ) 3031 replicateDeletes := hasReplicationRules(ctx, bucket, []ObjectToDelete{{ObjectName: object, VersionID: opts.VersionID}}) 3032 if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { 3033 hasLockEnabled = true 3034 } 3035 if _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket); err == nil { 3036 hasLifecycleConfig = true 3037 } 3038 if replicateDeletes || hasLockEnabled || hasLifecycleConfig { 3039 goi, gerr = getObjectInfo(ctx, bucket, object, ObjectOptions{ 3040 VersionID: opts.VersionID, 3041 }) 3042 } 3043 3044 replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr) 3045 if replicateDel { 3046 if opts.VersionID != "" { 3047 opts.VersionPurgeStatus = Pending 3048 } else { 3049 opts.DeleteMarkerReplicationStatus = string(replication.Pending) 3050 } 3051 } 3052 3053 vID := opts.VersionID 3054 if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() { 3055 // check if replica has permission to be deleted. 3056 if apiErrCode := checkRequestAuthType(ctx, r, policy.ReplicateDeleteAction, bucket, object); apiErrCode != ErrNone { 3057 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r)) 3058 return 3059 } 3060 opts.DeleteMarkerReplicationStatus = replication.Replica.String() 3061 if opts.VersionPurgeStatus.Empty() { 3062 // opts.VersionID holds delete marker version ID to replicate and not yet present on disk 3063 vID = "" 3064 } 3065 } 3066 3067 apiErr := ErrNone 3068 if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { 3069 if vID != "" { 3070 apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, ObjectToDelete{ 3071 ObjectName: object, 3072 VersionID: vID, 3073 }, goi, gerr) 3074 if apiErr != ErrNone && apiErr != ErrNoSuchKey { 3075 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r)) 3076 return 3077 } 3078 } 3079 } 3080 3081 if apiErr == ErrNoSuchKey { 3082 writeSuccessNoContent(w) 3083 return 3084 } 3085 3086 deleteObject := objectAPI.DeleteObject 3087 if api.CacheAPI() != nil { 3088 deleteObject = api.CacheAPI().DeleteObject 3089 } 3090 3091 // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html 3092 objInfo, err := deleteObject(ctx, bucket, object, opts) 3093 if err != nil { 3094 switch err.(type) { 3095 case BucketNotFound: 3096 // When bucket doesn't exist specially handle it. 3097 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3098 return 3099 } 3100 } 3101 3102 if objInfo.Name == "" { 3103 writeSuccessNoContent(w) 3104 return 3105 } 3106 3107 setPutObjHeaders(w, objInfo, true) 3108 writeSuccessNoContent(w) 3109 3110 eventName := event.ObjectRemovedDelete 3111 if objInfo.DeleteMarker { 3112 eventName = event.ObjectRemovedDeleteMarkerCreated 3113 } 3114 3115 // Notify object deleted event. 3116 sendEvent(eventArgs{ 3117 EventName: eventName, 3118 BucketName: bucket, 3119 Object: objInfo, 3120 ReqParams: extractReqParams(r), 3121 RespElements: extractRespElements(w), 3122 UserAgent: r.UserAgent(), 3123 Host: handlers.GetSourceIP(r), 3124 }) 3125 3126 if replicateDel { 3127 dmVersionID := "" 3128 versionID := "" 3129 if objInfo.DeleteMarker { 3130 dmVersionID = objInfo.VersionID 3131 } else { 3132 versionID = objInfo.VersionID 3133 } 3134 dobj := DeletedObjectVersionInfo{ 3135 DeletedObject: DeletedObject{ 3136 ObjectName: object, 3137 VersionID: versionID, 3138 DeleteMarkerVersionID: dmVersionID, 3139 DeleteMarkerReplicationStatus: string(objInfo.ReplicationStatus), 3140 DeleteMarkerMTime: DeleteMarkerMTime{objInfo.ModTime}, 3141 DeleteMarker: objInfo.DeleteMarker, 3142 VersionPurgeStatus: objInfo.VersionPurgeStatus, 3143 }, 3144 Bucket: bucket, 3145 } 3146 scheduleReplicationDelete(ctx, dobj, objectAPI, replicateSync) 3147 } 3148 3149 if goi.TransitionStatus == lifecycle.TransitionComplete { // clean up transitioned tier 3150 deleteTransitionedObject(ctx, objectAPI, bucket, object, lifecycle.ObjectOpts{ 3151 Name: object, 3152 UserTags: goi.UserTags, 3153 VersionID: goi.VersionID, 3154 DeleteMarker: goi.DeleteMarker, 3155 TransitionStatus: goi.TransitionStatus, 3156 IsLatest: goi.IsLatest, 3157 }, false, true) 3158 } 3159 } 3160 3161 // PutObjectLegalHoldHandler - set legal hold configuration to object, 3162 func (api ObjectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { 3163 ctx := NewContext(r, w, "PutObjectLegalHold") 3164 3165 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 3166 3167 vars := mux.Vars(r) 3168 bucket := vars["bucket"] 3169 object, err := unescapePath(vars["object"]) 3170 if err != nil { 3171 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3172 return 3173 } 3174 3175 objectAPI := api.ObjectAPI() 3176 if objectAPI == nil { 3177 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3178 return 3179 } 3180 3181 // Check permissions to perform this legal hold operation 3182 if s3Err := checkRequestAuthType(ctx, r, policy.PutObjectLegalHoldAction, bucket, object); s3Err != ErrNone { 3183 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 3184 return 3185 } 3186 3187 if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { 3188 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3189 return 3190 } 3191 if !hasContentMD5(r.Header) { 3192 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r)) 3193 return 3194 } 3195 3196 if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled { 3197 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r)) 3198 return 3199 } 3200 3201 legalHold, err := objectlock.ParseObjectLegalHold(r.Body) 3202 if err != nil { 3203 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3204 return 3205 } 3206 3207 getObjectInfo := objectAPI.GetObjectInfo 3208 if api.CacheAPI() != nil { 3209 getObjectInfo = api.CacheAPI().GetObjectInfo 3210 } 3211 3212 opts, err := getOpts(ctx, r, bucket, object) 3213 if err != nil { 3214 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3215 return 3216 } 3217 3218 objInfo, err := getObjectInfo(ctx, bucket, object, opts) 3219 if err != nil { 3220 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3221 return 3222 } 3223 if objInfo.DeleteMarker { 3224 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r)) 3225 return 3226 } 3227 objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = strings.ToUpper(string(legalHold.Status)) 3228 replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, "") 3229 if replicate { 3230 objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() 3231 } 3232 // if version-id is not specified retention is supposed to be set on the latest object. 3233 if opts.VersionID == "" { 3234 opts.VersionID = objInfo.VersionID 3235 } 3236 popts := ObjectOptions{ 3237 MTime: opts.MTime, 3238 VersionID: opts.VersionID, 3239 UserDefined: make(map[string]string, len(objInfo.UserDefined)), 3240 } 3241 for k, v := range objInfo.UserDefined { 3242 popts.UserDefined[k] = v 3243 } 3244 if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil { 3245 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3246 return 3247 } 3248 if replicate { 3249 scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.MetadataReplicationType) 3250 } 3251 writeSuccessResponseHeadersOnly(w) 3252 3253 // Notify object event. 3254 sendEvent(eventArgs{ 3255 EventName: event.ObjectCreatedPutLegalHold, 3256 BucketName: bucket, 3257 Object: objInfo, 3258 ReqParams: extractReqParams(r), 3259 RespElements: extractRespElements(w), 3260 UserAgent: r.UserAgent(), 3261 Host: handlers.GetSourceIP(r), 3262 }) 3263 3264 } 3265 3266 // GetObjectLegalHoldHandler - get legal hold configuration to object, 3267 func (api ObjectAPIHandlers) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { 3268 ctx := NewContext(r, w, "GetObjectLegalHold") 3269 3270 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 3271 3272 vars := mux.Vars(r) 3273 bucket := vars["bucket"] 3274 object, err := unescapePath(vars["object"]) 3275 if err != nil { 3276 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3277 return 3278 } 3279 3280 objectAPI := api.ObjectAPI() 3281 if objectAPI == nil { 3282 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3283 return 3284 } 3285 if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object); s3Error != ErrNone { 3286 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 3287 return 3288 } 3289 3290 getObjectInfo := objectAPI.GetObjectInfo 3291 if api.CacheAPI() != nil { 3292 getObjectInfo = api.CacheAPI().GetObjectInfo 3293 } 3294 3295 if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled { 3296 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r)) 3297 return 3298 } 3299 3300 opts, err := getOpts(ctx, r, bucket, object) 3301 if err != nil { 3302 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3303 return 3304 } 3305 3306 objInfo, err := getObjectInfo(ctx, bucket, object, opts) 3307 if err != nil { 3308 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3309 return 3310 } 3311 3312 legalHold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined) 3313 if legalHold.IsEmpty() { 3314 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchObjectLockConfiguration), r.URL, guessIsBrowserReq(r)) 3315 return 3316 } 3317 3318 WriteSuccessResponseXML(w, EncodeResponse(legalHold)) 3319 // Notify object legal hold accessed via a GET request. 3320 sendEvent(eventArgs{ 3321 EventName: event.ObjectAccessedGetLegalHold, 3322 BucketName: bucket, 3323 Object: objInfo, 3324 ReqParams: extractReqParams(r), 3325 RespElements: extractRespElements(w), 3326 UserAgent: r.UserAgent(), 3327 Host: handlers.GetSourceIP(r), 3328 }) 3329 } 3330 3331 // PutObjectRetentionHandler - set object hold configuration to object, 3332 func (api ObjectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { 3333 ctx := NewContext(r, w, "PutObjectRetention") 3334 3335 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 3336 3337 vars := mux.Vars(r) 3338 bucket := vars["bucket"] 3339 object, err := unescapePath(vars["object"]) 3340 if err != nil { 3341 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3342 return 3343 } 3344 3345 objectAPI := api.ObjectAPI() 3346 if objectAPI == nil { 3347 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3348 return 3349 } 3350 3351 cred, owner, claims, s3Err := validateSignature(getRequestAuthType(r), r) 3352 if s3Err != ErrNone { 3353 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 3354 return 3355 } 3356 3357 if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { 3358 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3359 return 3360 } 3361 3362 if !hasContentMD5(r.Header) { 3363 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r)) 3364 return 3365 } 3366 3367 if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled { 3368 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r)) 3369 return 3370 } 3371 3372 objRetention, err := objectlock.ParseObjectRetention(r.Body) 3373 if err != nil { 3374 apiErr := errorCodes.ToAPIErr(ErrMalformedXML) 3375 apiErr.Description = err.Error() 3376 WriteErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r)) 3377 return 3378 } 3379 3380 opts, err := getOpts(ctx, r, bucket, object) 3381 if err != nil { 3382 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3383 return 3384 } 3385 3386 getObjectInfo := objectAPI.GetObjectInfo 3387 if api.CacheAPI() != nil { 3388 getObjectInfo = api.CacheAPI().GetObjectInfo 3389 } 3390 3391 objInfo, s3Err := enforceRetentionBypassForPut(ctx, r, bucket, object, getObjectInfo, objRetention, cred, owner, claims) 3392 if s3Err != ErrNone { 3393 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) 3394 return 3395 } 3396 if objInfo.DeleteMarker { 3397 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r)) 3398 return 3399 } 3400 if objRetention.Mode.Valid() { 3401 objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(objRetention.Mode) 3402 objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = objRetention.RetainUntilDate.UTC().Format(time.RFC3339) 3403 } else { 3404 objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = "" 3405 objInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = "" 3406 } 3407 replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, "") 3408 if replicate { 3409 objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() 3410 } 3411 // if version-id is not specified retention is supposed to be set on the latest object. 3412 if opts.VersionID == "" { 3413 opts.VersionID = objInfo.VersionID 3414 } 3415 popts := ObjectOptions{ 3416 MTime: opts.MTime, 3417 VersionID: opts.VersionID, 3418 UserDefined: make(map[string]string, len(objInfo.UserDefined)), 3419 } 3420 for k, v := range objInfo.UserDefined { 3421 popts.UserDefined[k] = v 3422 } 3423 if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil { 3424 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3425 return 3426 } 3427 if replicate { 3428 scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.MetadataReplicationType) 3429 } 3430 3431 writeSuccessNoContent(w) 3432 // Notify object event. 3433 sendEvent(eventArgs{ 3434 EventName: event.ObjectCreatedPutRetention, 3435 BucketName: bucket, 3436 Object: objInfo, 3437 ReqParams: extractReqParams(r), 3438 RespElements: extractRespElements(w), 3439 UserAgent: r.UserAgent(), 3440 Host: handlers.GetSourceIP(r), 3441 }) 3442 } 3443 3444 // GetObjectRetentionHandler - get object retention configuration of object, 3445 func (api ObjectAPIHandlers) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { 3446 ctx := NewContext(r, w, "GetObjectRetention") 3447 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 3448 3449 vars := mux.Vars(r) 3450 bucket := vars["bucket"] 3451 object, err := unescapePath(vars["object"]) 3452 if err != nil { 3453 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3454 return 3455 } 3456 3457 objectAPI := api.ObjectAPI() 3458 if objectAPI == nil { 3459 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3460 return 3461 } 3462 if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object); s3Error != ErrNone { 3463 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 3464 return 3465 } 3466 3467 getObjectInfo := objectAPI.GetObjectInfo 3468 if api.CacheAPI() != nil { 3469 getObjectInfo = api.CacheAPI().GetObjectInfo 3470 } 3471 3472 if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled { 3473 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r)) 3474 return 3475 } 3476 3477 opts, err := getOpts(ctx, r, bucket, object) 3478 if err != nil { 3479 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3480 return 3481 } 3482 3483 objInfo, err := getObjectInfo(ctx, bucket, object, opts) 3484 if err != nil { 3485 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3486 return 3487 } 3488 3489 retention := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) 3490 if !retention.Mode.Valid() { 3491 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchObjectLockConfiguration), r.URL, guessIsBrowserReq(r)) 3492 return 3493 } 3494 3495 WriteSuccessResponseXML(w, EncodeResponse(retention)) 3496 // Notify object retention accessed via a GET request. 3497 sendEvent(eventArgs{ 3498 EventName: event.ObjectAccessedGetRetention, 3499 BucketName: bucket, 3500 Object: objInfo, 3501 ReqParams: extractReqParams(r), 3502 RespElements: extractRespElements(w), 3503 UserAgent: r.UserAgent(), 3504 Host: handlers.GetSourceIP(r), 3505 }) 3506 } 3507 3508 // GetObjectTaggingHandler - GET object tagging 3509 func (api ObjectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { 3510 ctx := NewContext(r, w, "GetObjectTagging") 3511 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 3512 3513 vars := mux.Vars(r) 3514 bucket := vars["bucket"] 3515 object, err := unescapePath(vars["object"]) 3516 if err != nil { 3517 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3518 return 3519 } 3520 3521 objAPI := api.ObjectAPI() 3522 if objAPI == nil { 3523 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3524 return 3525 } 3526 3527 if !objAPI.IsTaggingSupported() { 3528 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 3529 return 3530 } 3531 3532 // Allow getObjectTagging if policy action is set. 3533 if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectTaggingAction, bucket, object); s3Error != ErrNone { 3534 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 3535 return 3536 } 3537 3538 opts, err := getOpts(ctx, r, bucket, object) 3539 if err != nil { 3540 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3541 return 3542 } 3543 3544 // Get object tags 3545 tags, err := objAPI.GetObjectTags(ctx, bucket, object, opts) 3546 if err != nil { 3547 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3548 return 3549 } 3550 3551 if opts.VersionID != "" { 3552 w.Header()[xhttp.AmzVersionID] = []string{opts.VersionID} 3553 } 3554 3555 WriteSuccessResponseXML(w, EncodeResponse(tags)) 3556 } 3557 3558 // PutObjectTaggingHandler - PUT object tagging 3559 func (api ObjectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { 3560 ctx := NewContext(r, w, "PutObjectTagging") 3561 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 3562 3563 vars := mux.Vars(r) 3564 bucket := vars["bucket"] 3565 object, err := unescapePath(vars["object"]) 3566 if err != nil { 3567 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3568 return 3569 } 3570 3571 objAPI := api.ObjectAPI() 3572 if objAPI == nil { 3573 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3574 return 3575 } 3576 if !objAPI.IsTaggingSupported() { 3577 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 3578 return 3579 } 3580 3581 // Allow putObjectTagging if policy action is set 3582 if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectTaggingAction, bucket, object); s3Error != ErrNone { 3583 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 3584 return 3585 } 3586 3587 tags, err := tags.ParseObjectXML(io.LimitReader(r.Body, r.ContentLength)) 3588 if err != nil { 3589 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3590 return 3591 } 3592 3593 opts, err := getOpts(ctx, r, bucket, object) 3594 if err != nil { 3595 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3596 return 3597 } 3598 3599 replicate, sync := mustReplicate(ctx, r, bucket, object, map[string]string{xhttp.AmzObjectTagging: tags.String()}, "") 3600 if replicate { 3601 opts.UserDefined = make(map[string]string) 3602 opts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() 3603 } 3604 3605 tagsStr := tags.String() 3606 3607 // Put object tags 3608 objInfo, err := objAPI.PutObjectTags(ctx, bucket, object, tagsStr, opts) 3609 if err != nil { 3610 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3611 return 3612 } 3613 3614 if replicate { 3615 scheduleReplication(ctx, objInfo.Clone(), objAPI, sync, replication.MetadataReplicationType) 3616 } 3617 3618 if objInfo.VersionID != "" { 3619 w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} 3620 } 3621 3622 writeSuccessResponseHeadersOnly(w) 3623 3624 sendEvent(eventArgs{ 3625 EventName: event.ObjectCreatedPutTagging, 3626 BucketName: bucket, 3627 Object: objInfo, 3628 ReqParams: extractReqParams(r), 3629 RespElements: extractRespElements(w), 3630 UserAgent: r.UserAgent(), 3631 Host: handlers.GetSourceIP(r), 3632 }) 3633 3634 } 3635 3636 // DeleteObjectTaggingHandler - DELETE object tagging 3637 func (api ObjectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { 3638 ctx := NewContext(r, w, "DeleteObjectTagging") 3639 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 3640 3641 objAPI := api.ObjectAPI() 3642 if objAPI == nil { 3643 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3644 return 3645 } 3646 if !objAPI.IsTaggingSupported() { 3647 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r)) 3648 return 3649 } 3650 3651 vars := mux.Vars(r) 3652 bucket := vars["bucket"] 3653 object, err := unescapePath(vars["object"]) 3654 if err != nil { 3655 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3656 return 3657 } 3658 3659 // Allow deleteObjectTagging if policy action is set 3660 if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectTaggingAction, bucket, object); s3Error != ErrNone { 3661 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 3662 return 3663 } 3664 3665 opts, err := getOpts(ctx, r, bucket, object) 3666 if err != nil { 3667 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3668 return 3669 } 3670 3671 oi, err := objAPI.GetObjectInfo(ctx, bucket, object, opts) 3672 if err != nil { 3673 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3674 return 3675 } 3676 replicate, sync := mustReplicate(ctx, r, bucket, object, map[string]string{xhttp.AmzObjectTagging: oi.UserTags}, "") 3677 if replicate { 3678 opts.UserDefined = make(map[string]string) 3679 opts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() 3680 } 3681 3682 oi, err = objAPI.DeleteObjectTags(ctx, bucket, object, opts) 3683 if err != nil { 3684 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3685 return 3686 } 3687 3688 if replicate { 3689 scheduleReplication(ctx, oi.Clone(), objAPI, sync, replication.MetadataReplicationType) 3690 } 3691 3692 if oi.VersionID != "" { 3693 w.Header()[xhttp.AmzVersionID] = []string{oi.VersionID} 3694 } 3695 writeSuccessNoContent(w) 3696 3697 sendEvent(eventArgs{ 3698 EventName: event.ObjectCreatedDeleteTagging, 3699 BucketName: bucket, 3700 Object: oi, 3701 ReqParams: extractReqParams(r), 3702 RespElements: extractRespElements(w), 3703 UserAgent: r.UserAgent(), 3704 Host: handlers.GetSourceIP(r), 3705 }) 3706 } 3707 3708 // RestoreObjectHandler - POST restore object handler. 3709 // ---------- 3710 func (api ObjectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *http.Request) { 3711 ctx := NewContext(r, w, "PostRestoreObject") 3712 defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) 3713 vars := mux.Vars(r) 3714 bucket := vars["bucket"] 3715 object, err := unescapePath(vars["object"]) 3716 if err != nil { 3717 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3718 return 3719 } 3720 3721 // Fetch object stat info. 3722 objectAPI := api.ObjectAPI() 3723 if objectAPI == nil { 3724 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) 3725 return 3726 } 3727 3728 getObjectInfo := objectAPI.GetObjectInfo 3729 if api.CacheAPI() != nil { 3730 getObjectInfo = api.CacheAPI().GetObjectInfo 3731 } 3732 3733 // Check for auth type to return S3 compatible error. 3734 if s3Error := checkRequestAuthType(ctx, r, policy.RestoreObjectAction, bucket, object); s3Error != ErrNone { 3735 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) 3736 return 3737 } 3738 3739 if r.ContentLength <= 0 { 3740 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL, guessIsBrowserReq(r)) 3741 return 3742 } 3743 3744 objInfo, err := getObjectInfo(ctx, bucket, object, ObjectOptions{}) 3745 if err != nil { 3746 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3747 return 3748 } 3749 3750 if objInfo.TransitionStatus != lifecycle.TransitionComplete { 3751 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL, guessIsBrowserReq(r)) 3752 return 3753 } 3754 3755 rreq, err := parseRestoreRequest(io.LimitReader(r.Body, r.ContentLength)) 3756 if err != nil { 3757 apiErr := errorCodes.ToAPIErr(ErrMalformedXML) 3758 apiErr.Description = err.Error() 3759 WriteErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r)) 3760 return 3761 } 3762 // validate the request 3763 if err := rreq.validate(ctx, objectAPI); err != nil { 3764 apiErr := errorCodes.ToAPIErr(ErrMalformedXML) 3765 apiErr.Description = err.Error() 3766 WriteErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r)) 3767 return 3768 } 3769 statusCode := http.StatusOK 3770 alreadyRestored := false 3771 if err == nil { 3772 if objInfo.RestoreOngoing && rreq.Type != SelectRestoreRequest { 3773 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrObjectRestoreAlreadyInProgress), r.URL, guessIsBrowserReq(r)) 3774 return 3775 } 3776 if !objInfo.RestoreOngoing && !objInfo.RestoreExpires.IsZero() { 3777 statusCode = http.StatusAccepted 3778 alreadyRestored = true 3779 } 3780 } 3781 // set or upgrade restore expiry 3782 restoreExpiry := lifecycle.ExpectedExpiryTime(time.Now(), rreq.Days) 3783 metadata := cloneMSS(objInfo.UserDefined) 3784 3785 // update self with restore metadata 3786 if rreq.Type != SelectRestoreRequest { 3787 objInfo.metadataOnly = true // Perform only metadata updates. 3788 ongoingReq := true 3789 if alreadyRestored { 3790 ongoingReq = false 3791 } 3792 metadata[xhttp.AmzRestoreExpiryDays] = strconv.Itoa(rreq.Days) 3793 metadata[xhttp.AmzRestoreRequestDate] = time.Now().UTC().Format(http.TimeFormat) 3794 if alreadyRestored { 3795 metadata[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", ongoingReq, restoreExpiry.Format(http.TimeFormat)) 3796 } else { 3797 metadata[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t", ongoingReq) 3798 } 3799 objInfo.UserDefined = metadata 3800 if _, err := objectAPI.CopyObject(GlobalContext, bucket, object, bucket, object, objInfo, ObjectOptions{ 3801 VersionID: objInfo.VersionID, 3802 }, ObjectOptions{ 3803 VersionID: objInfo.VersionID, 3804 }); err != nil { 3805 logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err)) 3806 WriteErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL, guessIsBrowserReq(r)) 3807 return 3808 } 3809 // for previously restored object, just update the restore expiry 3810 if alreadyRestored { 3811 return 3812 } 3813 } 3814 3815 restoreObject := mustGetUUID() 3816 if rreq.OutputLocation.S3.BucketName != "" { 3817 w.Header()[xhttp.AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restoreObject)} 3818 } 3819 w.WriteHeader(statusCode) 3820 // Notify object restore started via a POST request. 3821 sendEvent(eventArgs{ 3822 EventName: event.ObjectRestorePostInitiated, 3823 BucketName: bucket, 3824 Object: objInfo, 3825 ReqParams: extractReqParams(r), 3826 UserAgent: r.UserAgent(), 3827 Host: handlers.GetSourceIP(r), 3828 }) 3829 // now process the restore in background 3830 go func() { 3831 rctx := GlobalContext 3832 if !rreq.SelectParameters.IsEmpty() { 3833 getObject := func(offset, length int64) (rc io.ReadCloser, err error) { 3834 isSuffixLength := false 3835 if offset < 0 { 3836 isSuffixLength = true 3837 } 3838 3839 rs := &HTTPRangeSpec{ 3840 IsSuffixLength: isSuffixLength, 3841 Start: offset, 3842 End: offset + length, 3843 } 3844 3845 return getTransitionedObjectReader(rctx, bucket, object, rs, r.Header, objInfo, ObjectOptions{ 3846 VersionID: objInfo.VersionID, 3847 }) 3848 } 3849 if err = rreq.SelectParameters.Open(getObject); err != nil { 3850 if serr, ok := err.(s3select.SelectError); ok { 3851 encodedErrorResponse := EncodeResponse(APIErrorResponse{ 3852 Code: serr.ErrorCode(), 3853 Message: serr.ErrorMessage(), 3854 BucketName: bucket, 3855 Key: object, 3856 Resource: r.URL.Path, 3857 RequestID: w.Header().Get(xhttp.AmzRequestID), 3858 HostID: globalDeploymentID, 3859 }) 3860 writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML) 3861 } else { 3862 WriteErrorResponse(ctx, w, ToAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) 3863 } 3864 return 3865 } 3866 nr := httptest.NewRecorder() 3867 rw := logger.NewResponseWriter(nr) 3868 rw.LogErrBody = true 3869 rw.LogAllBody = true 3870 rreq.SelectParameters.Evaluate(rw) 3871 rreq.SelectParameters.Close() 3872 return 3873 } 3874 if err := restoreTransitionedObject(rctx, bucket, object, objectAPI, objInfo, rreq, restoreExpiry); err != nil { 3875 return 3876 } 3877 3878 // Notify object restore completed via a POST request. 3879 sendEvent(eventArgs{ 3880 EventName: event.ObjectRestorePostCompleted, 3881 BucketName: bucket, 3882 Object: objInfo, 3883 ReqParams: extractReqParams(r), 3884 UserAgent: r.UserAgent(), 3885 Host: handlers.GetSourceIP(r), 3886 }) 3887 }() 3888 }