storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/fs-v1-multipart.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "context" 21 "encoding/json" 22 "errors" 23 "fmt" 24 "io/ioutil" 25 "os" 26 pathutil "path" 27 "sort" 28 "strconv" 29 "strings" 30 "time" 31 32 jsoniter "github.com/json-iterator/go" 33 34 "storj.io/minio/cmd/logger" 35 xioutil "storj.io/minio/pkg/ioutil" 36 "storj.io/minio/pkg/trie" 37 ) 38 39 // Returns EXPORT/.minio.sys/multipart/SHA256/UPLOADID 40 func (fs *FSObjects) getUploadIDDir(bucket, object, uploadID string) string { 41 return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object))), uploadID) 42 } 43 44 // Returns EXPORT/.minio.sys/multipart/SHA256 45 func (fs *FSObjects) getMultipartSHADir(bucket, object string) string { 46 return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object)))) 47 } 48 49 // Returns partNumber.etag 50 func (fs *FSObjects) encodePartFile(partNumber int, etag string, actualSize int64) string { 51 return fmt.Sprintf("%.5d.%s.%d", partNumber, etag, actualSize) 52 } 53 54 // Returns partNumber and etag 55 func (fs *FSObjects) decodePartFile(name string) (partNumber int, etag string, actualSize int64, err error) { 56 result := strings.Split(name, ".") 57 if len(result) != 3 { 58 return 0, "", 0, errUnexpected 59 } 60 partNumber, err = strconv.Atoi(result[0]) 61 if err != nil { 62 return 0, "", 0, errUnexpected 63 } 64 actualSize, err = strconv.ParseInt(result[2], 10, 64) 65 if err != nil { 66 return 0, "", 0, errUnexpected 67 } 68 return partNumber, result[1], actualSize, nil 69 } 70 71 // Appends parts to an appendFile sequentially. 72 func (fs *FSObjects) backgroundAppend(ctx context.Context, bucket, object, uploadID string) { 73 fs.appendFileMapMu.Lock() 74 logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID) 75 file := fs.appendFileMap[uploadID] 76 if file == nil { 77 file = &fsAppendFile{ 78 filePath: pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, fmt.Sprintf("%s.%s", uploadID, mustGetUUID())), 79 } 80 fs.appendFileMap[uploadID] = file 81 } 82 fs.appendFileMapMu.Unlock() 83 84 file.Lock() 85 defer file.Unlock() 86 87 // Since we append sequentially nextPartNumber will always be len(file.parts)+1 88 nextPartNumber := len(file.parts) + 1 89 uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) 90 91 entries, err := readDir(uploadIDDir) 92 if err != nil { 93 logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir) 94 logger.LogIf(ctx, err) 95 return 96 } 97 sort.Strings(entries) 98 99 for _, entry := range entries { 100 if entry == fs.metaJSONFile { 101 continue 102 } 103 partNumber, etag, actualSize, err := fs.decodePartFile(entry) 104 if err != nil { 105 // Skip part files whose name don't match expected format. These could be backend filesystem specific files. 106 continue 107 } 108 if partNumber < nextPartNumber { 109 // Part already appended. 110 continue 111 } 112 if partNumber > nextPartNumber { 113 // Required part number is not yet uploaded. 114 return 115 } 116 117 partPath := pathJoin(uploadIDDir, entry) 118 err = xioutil.AppendFile(file.filePath, partPath, globalFSOSync) 119 if err != nil { 120 reqInfo := logger.GetReqInfo(ctx).AppendTags("partPath", partPath) 121 reqInfo.AppendTags("filepath", file.filePath) 122 logger.LogIf(ctx, err) 123 return 124 } 125 126 file.parts = append(file.parts, PartInfo{PartNumber: partNumber, ETag: etag, ActualSize: actualSize}) 127 nextPartNumber++ 128 } 129 } 130 131 // ListMultipartUploads - lists all the uploadIDs for the specified object. 132 // We do not support prefix based listing. 133 func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { 134 if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil { 135 return result, toObjectErr(err) 136 } 137 138 if _, err := fs.statBucketDir(ctx, bucket); err != nil { 139 return result, toObjectErr(err, bucket) 140 } 141 142 result.MaxUploads = maxUploads 143 result.KeyMarker = keyMarker 144 result.Prefix = object 145 result.Delimiter = delimiter 146 result.NextKeyMarker = object 147 result.UploadIDMarker = uploadIDMarker 148 149 uploadIDs, err := readDir(fs.getMultipartSHADir(bucket, object)) 150 if err != nil { 151 if err == errFileNotFound { 152 result.IsTruncated = false 153 return result, nil 154 } 155 logger.LogIf(ctx, err) 156 return result, toObjectErr(err) 157 } 158 159 // S3 spec says uploadIDs should be sorted based on initiated time. ModTime of fs.json 160 // is the creation time of the uploadID, hence we will use that. 161 var uploads []MultipartInfo 162 for _, uploadID := range uploadIDs { 163 metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile) 164 fi, err := fsStatFile(ctx, metaFilePath) 165 if err != nil { 166 return result, toObjectErr(err, bucket, object) 167 } 168 uploads = append(uploads, MultipartInfo{ 169 Object: object, 170 UploadID: strings.TrimSuffix(uploadID, SlashSeparator), 171 Initiated: fi.ModTime(), 172 }) 173 } 174 sort.Slice(uploads, func(i int, j int) bool { 175 return uploads[i].Initiated.Before(uploads[j].Initiated) 176 }) 177 178 uploadIndex := 0 179 if uploadIDMarker != "" { 180 for uploadIndex < len(uploads) { 181 if uploads[uploadIndex].UploadID != uploadIDMarker { 182 uploadIndex++ 183 continue 184 } 185 if uploads[uploadIndex].UploadID == uploadIDMarker { 186 uploadIndex++ 187 break 188 } 189 uploadIndex++ 190 } 191 } 192 for uploadIndex < len(uploads) { 193 result.Uploads = append(result.Uploads, uploads[uploadIndex]) 194 result.NextUploadIDMarker = uploads[uploadIndex].UploadID 195 uploadIndex++ 196 if len(result.Uploads) == maxUploads { 197 break 198 } 199 } 200 201 result.IsTruncated = uploadIndex < len(uploads) 202 203 if !result.IsTruncated { 204 result.NextKeyMarker = "" 205 result.NextUploadIDMarker = "" 206 } 207 208 return result, nil 209 } 210 211 // NewMultipartUpload - initialize a new multipart upload, returns a 212 // unique id. The unique id returned here is of UUID form, for each 213 // subsequent request each UUID is unique. 214 // 215 // Implements S3 compatible initiate multipart API. 216 func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { 217 if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil { 218 return "", toObjectErr(err, bucket) 219 } 220 221 if _, err := fs.statBucketDir(ctx, bucket); err != nil { 222 return "", toObjectErr(err, bucket) 223 } 224 225 uploadID := mustGetUUID() 226 uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) 227 228 err := mkdirAll(uploadIDDir, 0755) 229 if err != nil { 230 logger.LogIf(ctx, err) 231 return "", err 232 } 233 234 // Initialize fs.json values. 235 fsMeta := newFSMetaV1() 236 fsMeta.Meta = opts.UserDefined 237 238 fsMetaBytes, err := json.Marshal(fsMeta) 239 if err != nil { 240 logger.LogIf(ctx, err) 241 return "", err 242 } 243 244 if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil { 245 logger.LogIf(ctx, err) 246 return "", err 247 } 248 249 return uploadID, nil 250 } 251 252 // CopyObjectPart - similar to PutObjectPart but reads data from an existing 253 // object. Internally incoming data is written to '.minio.sys/tmp' location 254 // and safely renamed to '.minio.sys/multipart' for reach parts. 255 func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, 256 startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { 257 258 if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { 259 return pi, VersionNotFound{ 260 Bucket: srcBucket, 261 Object: srcObject, 262 VersionID: srcOpts.VersionID, 263 } 264 } 265 266 if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil { 267 return pi, toObjectErr(err) 268 } 269 270 partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) 271 if err != nil { 272 return pi, toObjectErr(err, dstBucket, dstObject) 273 } 274 275 return partInfo, nil 276 } 277 278 // PutObjectPart - reads incoming data until EOF for the part file on 279 // an ongoing multipart transaction. Internally incoming data is 280 // written to '.minio.sys/tmp' location and safely renamed to 281 // '.minio.sys/multipart' for reach parts. 282 func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { 283 if opts.VersionID != "" && opts.VersionID != nullVersionID { 284 return pi, VersionNotFound{ 285 Bucket: bucket, 286 Object: object, 287 VersionID: opts.VersionID, 288 } 289 } 290 291 data := r.Reader 292 if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil { 293 return pi, toObjectErr(err, bucket) 294 } 295 296 if _, err := fs.statBucketDir(ctx, bucket); err != nil { 297 return pi, toObjectErr(err, bucket) 298 } 299 300 // Validate input data size and it can never be less than -1. 301 if data.Size() < -1 { 302 logger.LogIf(ctx, errInvalidArgument, logger.Application) 303 return pi, toObjectErr(errInvalidArgument) 304 } 305 306 uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) 307 308 // Just check if the uploadID exists to avoid copy if it doesn't. 309 _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) 310 if err != nil { 311 if err == errFileNotFound || err == errFileAccessDenied { 312 return pi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} 313 } 314 return pi, toObjectErr(err, bucket, object) 315 } 316 317 tmpPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"."+mustGetUUID()+"."+strconv.Itoa(partID)) 318 bytesWritten, err := fsCreateFile(ctx, tmpPartPath, data, data.Size()) 319 320 // Delete temporary part in case of failure. If 321 // PutObjectPart succeeds then there would be nothing to 322 // delete in which case we just ignore the error. 323 defer fsRemoveFile(ctx, tmpPartPath) 324 325 if err != nil { 326 return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath) 327 } 328 329 // Should return IncompleteBody{} error when reader has fewer 330 // bytes than specified in request header. 331 if bytesWritten < data.Size() { 332 return pi, IncompleteBody{Bucket: bucket, Object: object} 333 } 334 335 etag := r.MD5CurrentHexString() 336 337 if etag == "" { 338 etag = GenETag() 339 } 340 341 partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag, data.ActualSize())) 342 343 // Make sure not to create parent directories if they don't exist - the upload might have been aborted. 344 if err = fsSimpleRenameFile(ctx, tmpPartPath, partPath); err != nil { 345 if err == errFileNotFound || err == errFileAccessDenied { 346 return pi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} 347 } 348 return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) 349 } 350 351 go fs.backgroundAppend(ctx, bucket, object, uploadID) 352 353 fi, err := fsStatFile(ctx, partPath) 354 if err != nil { 355 return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) 356 } 357 return PartInfo{ 358 PartNumber: partID, 359 LastModified: fi.ModTime(), 360 ETag: etag, 361 Size: fi.Size(), 362 ActualSize: data.ActualSize(), 363 }, nil 364 } 365 366 // GetMultipartInfo returns multipart metadata uploaded during newMultipartUpload, used 367 // by callers to verify object states 368 // - encrypted 369 // - compressed 370 func (fs *FSObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { 371 minfo := MultipartInfo{ 372 Bucket: bucket, 373 Object: object, 374 UploadID: uploadID, 375 } 376 377 if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil { 378 return minfo, toObjectErr(err) 379 } 380 381 // Check if bucket exists 382 if _, err := fs.statBucketDir(ctx, bucket); err != nil { 383 return minfo, toObjectErr(err, bucket) 384 } 385 386 uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) 387 if _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)); err != nil { 388 if err == errFileNotFound || err == errFileAccessDenied { 389 return minfo, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} 390 } 391 return minfo, toObjectErr(err, bucket, object) 392 } 393 394 fsMetaBytes, err := xioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) 395 if err != nil { 396 logger.LogIf(ctx, err) 397 return minfo, toObjectErr(err, bucket, object) 398 } 399 400 var fsMeta fsMetaV1 401 var json = jsoniter.ConfigCompatibleWithStandardLibrary 402 if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil { 403 return minfo, toObjectErr(err, bucket, object) 404 } 405 406 minfo.UserDefined = fsMeta.Meta 407 return minfo, nil 408 } 409 410 // ListObjectParts - lists all previously uploaded parts for a given 411 // object and uploadID. Takes additional input of part-number-marker 412 // to indicate where the listing should begin from. 413 // 414 // Implements S3 compatible ListObjectParts API. The resulting 415 // ListPartsInfo structure is unmarshalled directly into XML and 416 // replied back to the client. 417 func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) { 418 if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil { 419 return result, toObjectErr(err) 420 } 421 result.Bucket = bucket 422 result.Object = object 423 result.UploadID = uploadID 424 result.MaxParts = maxParts 425 result.PartNumberMarker = partNumberMarker 426 427 // Check if bucket exists 428 if _, err := fs.statBucketDir(ctx, bucket); err != nil { 429 return result, toObjectErr(err, bucket) 430 } 431 432 uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) 433 if _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)); err != nil { 434 if err == errFileNotFound || err == errFileAccessDenied { 435 return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} 436 } 437 return result, toObjectErr(err, bucket, object) 438 } 439 440 entries, err := readDir(uploadIDDir) 441 if err != nil { 442 logger.LogIf(ctx, err) 443 return result, toObjectErr(err, bucket) 444 } 445 446 partsMap := make(map[int]PartInfo) 447 for _, entry := range entries { 448 if entry == fs.metaJSONFile { 449 continue 450 } 451 452 partNumber, currentEtag, actualSize, derr := fs.decodePartFile(entry) 453 if derr != nil { 454 // Skip part files whose name don't match expected format. These could be backend filesystem specific files. 455 continue 456 } 457 458 entryStat, err := fsStatFile(ctx, pathJoin(uploadIDDir, entry)) 459 if err != nil { 460 continue 461 } 462 463 currentMeta := PartInfo{ 464 PartNumber: partNumber, 465 ETag: currentEtag, 466 ActualSize: actualSize, 467 Size: entryStat.Size(), 468 LastModified: entryStat.ModTime(), 469 } 470 471 cachedMeta, ok := partsMap[partNumber] 472 if !ok { 473 partsMap[partNumber] = currentMeta 474 continue 475 } 476 477 if currentMeta.LastModified.After(cachedMeta.LastModified) { 478 partsMap[partNumber] = currentMeta 479 } 480 } 481 482 var parts []PartInfo 483 for _, partInfo := range partsMap { 484 parts = append(parts, partInfo) 485 } 486 487 sort.Slice(parts, func(i int, j int) bool { 488 return parts[i].PartNumber < parts[j].PartNumber 489 }) 490 491 i := 0 492 if partNumberMarker != 0 { 493 // If the marker was set, skip the entries till the marker. 494 for _, part := range parts { 495 i++ 496 if part.PartNumber == partNumberMarker { 497 break 498 } 499 } 500 } 501 502 partsCount := 0 503 for partsCount < maxParts && i < len(parts) { 504 result.Parts = append(result.Parts, parts[i]) 505 i++ 506 partsCount++ 507 } 508 if i < len(parts) { 509 result.IsTruncated = true 510 if partsCount != 0 { 511 result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber 512 } 513 } 514 515 rc, _, err := fsOpenFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile), 0) 516 if err != nil { 517 if err == errFileNotFound || err == errFileAccessDenied { 518 return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} 519 } 520 return result, toObjectErr(err, bucket, object) 521 } 522 defer rc.Close() 523 524 fsMetaBytes, err := ioutil.ReadAll(rc) 525 if err != nil { 526 return result, toObjectErr(err, bucket, object) 527 } 528 529 var fsMeta fsMetaV1 530 var json = jsoniter.ConfigCompatibleWithStandardLibrary 531 if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil { 532 return result, err 533 } 534 535 result.UserDefined = fsMeta.Meta 536 return result, nil 537 } 538 539 // CompleteMultipartUpload - completes an ongoing multipart 540 // transaction after receiving all the parts indicated by the client. 541 // Returns an md5sum calculated by concatenating all the individual 542 // md5sums of all the parts. 543 // 544 // Implements S3 compatible Complete multipart API. 545 func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { 546 547 var actualSize int64 548 549 if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil { 550 return oi, toObjectErr(err) 551 } 552 553 // Check if an object is present as one of the parent dir. 554 if fs.parentDirIsObject(ctx, bucket, pathutil.Dir(object)) { 555 return oi, toObjectErr(errFileParentIsFile, bucket, object) 556 } 557 558 if _, err := fs.statBucketDir(ctx, bucket); err != nil { 559 return oi, toObjectErr(err, bucket) 560 } 561 defer ObjectPathUpdated(pathutil.Join(bucket, object)) 562 563 uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) 564 // Just check if the uploadID exists to avoid copy if it doesn't. 565 _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) 566 if err != nil { 567 if err == errFileNotFound || err == errFileAccessDenied { 568 return oi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} 569 } 570 return oi, toObjectErr(err, bucket, object) 571 } 572 573 // Calculate s3 compatible md5sum for complete multipart. 574 s3MD5 := getCompleteMultipartMD5(parts) 575 576 // ensure that part ETag is canonicalized to strip off extraneous quotes 577 for i := range parts { 578 parts[i].ETag = canonicalizeETag(parts[i].ETag) 579 } 580 581 fsMeta := fsMetaV1{} 582 583 // Allocate parts similar to incoming slice. 584 fsMeta.Parts = make([]ObjectPartInfo, len(parts)) 585 586 entries, err := readDir(uploadIDDir) 587 if err != nil { 588 logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir) 589 logger.LogIf(ctx, err) 590 return oi, err 591 } 592 593 // Create entries trie structure for prefix match 594 entriesTrie := trie.NewTrie() 595 for _, entry := range entries { 596 entriesTrie.Insert(entry) 597 } 598 599 // Save consolidated actual size. 600 var objectActualSize int64 601 // Validate all parts and then commit to disk. 602 for i, part := range parts { 603 partFile := getPartFile(entriesTrie, part.PartNumber, part.ETag) 604 if partFile == "" { 605 return oi, InvalidPart{ 606 PartNumber: part.PartNumber, 607 GotETag: part.ETag, 608 } 609 } 610 611 // Read the actualSize from the pathFileName. 612 subParts := strings.Split(partFile, ".") 613 actualSize, err = strconv.ParseInt(subParts[len(subParts)-1], 10, 64) 614 if err != nil { 615 return oi, InvalidPart{ 616 PartNumber: part.PartNumber, 617 GotETag: part.ETag, 618 } 619 } 620 621 partPath := pathJoin(uploadIDDir, partFile) 622 623 var fi os.FileInfo 624 fi, err = fsStatFile(ctx, partPath) 625 if err != nil { 626 if err == errFileNotFound || err == errFileAccessDenied { 627 return oi, InvalidPart{} 628 } 629 return oi, err 630 } 631 632 fsMeta.Parts[i] = ObjectPartInfo{ 633 Number: part.PartNumber, 634 Size: fi.Size(), 635 ActualSize: actualSize, 636 } 637 638 // Consolidate the actual size. 639 objectActualSize += actualSize 640 641 if i == len(parts)-1 { 642 break 643 } 644 645 // All parts except the last part has to be atleast 5MB. 646 if !isMinAllowedPartSize(actualSize) { 647 return oi, PartTooSmall{ 648 PartNumber: part.PartNumber, 649 PartSize: actualSize, 650 PartETag: part.ETag, 651 } 652 } 653 } 654 655 appendFallback := true // In case background-append did not append the required parts. 656 appendFilePath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, fmt.Sprintf("%s.%s", uploadID, mustGetUUID())) 657 658 // Most of the times appendFile would already be fully appended by now. We call fs.backgroundAppend() 659 // to take care of the following corner case: 660 // 1. The last PutObjectPart triggers go-routine fs.backgroundAppend, this go-routine has not started yet. 661 // 2. Now CompleteMultipartUpload gets called which sees that lastPart is not appended and starts appending 662 // from the beginning 663 fs.backgroundAppend(ctx, bucket, object, uploadID) 664 665 fs.appendFileMapMu.Lock() 666 file := fs.appendFileMap[uploadID] 667 delete(fs.appendFileMap, uploadID) 668 fs.appendFileMapMu.Unlock() 669 670 if file != nil { 671 file.Lock() 672 defer file.Unlock() 673 // Verify that appendFile has all the parts. 674 if len(file.parts) == len(parts) { 675 for i := range parts { 676 if parts[i].ETag != file.parts[i].ETag { 677 break 678 } 679 if parts[i].PartNumber != file.parts[i].PartNumber { 680 break 681 } 682 if i == len(parts)-1 { 683 appendFilePath = file.filePath 684 appendFallback = false 685 } 686 } 687 } 688 } 689 690 if appendFallback { 691 if file != nil { 692 fsRemoveFile(ctx, file.filePath) 693 } 694 for _, part := range parts { 695 partFile := getPartFile(entriesTrie, part.PartNumber, part.ETag) 696 if partFile == "" { 697 logger.LogIf(ctx, fmt.Errorf("%.5d.%s missing will not proceed", 698 part.PartNumber, part.ETag)) 699 return oi, InvalidPart{ 700 PartNumber: part.PartNumber, 701 GotETag: part.ETag, 702 } 703 } 704 if err = xioutil.AppendFile(appendFilePath, pathJoin(uploadIDDir, partFile), globalFSOSync); err != nil { 705 logger.LogIf(ctx, err) 706 return oi, toObjectErr(err) 707 } 708 } 709 } 710 711 // Hold write lock on the object. 712 destLock := fs.NewNSLock(bucket, object) 713 ctx, err = destLock.GetLock(ctx, globalOperationTimeout) 714 if err != nil { 715 return oi, err 716 } 717 defer destLock.Unlock() 718 719 bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix) 720 fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile) 721 metaFile, err := fs.rwPool.Write(fsMetaPath) 722 var freshFile bool 723 if err != nil { 724 if !errors.Is(err, errFileNotFound) { 725 logger.LogIf(ctx, err) 726 return oi, toObjectErr(err, bucket, object) 727 } 728 metaFile, err = fs.rwPool.Create(fsMetaPath) 729 if err != nil { 730 logger.LogIf(ctx, err) 731 return oi, toObjectErr(err, bucket, object) 732 } 733 freshFile = true 734 } 735 defer metaFile.Close() 736 defer func() { 737 // Remove meta file when CompleteMultipart encounters 738 // any error and it is a fresh file. 739 // 740 // We should preserve the `fs.json` of any 741 // existing object 742 if e != nil && freshFile { 743 tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID) 744 fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir) 745 } 746 }() 747 748 // Read saved fs metadata for ongoing multipart. 749 fsMetaBuf, err := xioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) 750 if err != nil { 751 logger.LogIf(ctx, err) 752 return oi, toObjectErr(err, bucket, object) 753 } 754 err = json.Unmarshal(fsMetaBuf, &fsMeta) 755 if err != nil { 756 logger.LogIf(ctx, err) 757 return oi, toObjectErr(err, bucket, object) 758 } 759 // Save additional metadata. 760 if fsMeta.Meta == nil { 761 fsMeta.Meta = make(map[string]string) 762 } 763 fsMeta.Meta["etag"] = s3MD5 764 // Save consolidated actual size. 765 fsMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) 766 if _, err = fsMeta.WriteTo(metaFile); err != nil { 767 logger.LogIf(ctx, err) 768 return oi, toObjectErr(err, bucket, object) 769 } 770 771 err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object)) 772 if err != nil { 773 logger.LogIf(ctx, err) 774 return oi, toObjectErr(err, bucket, object) 775 } 776 777 // Purge multipart folders 778 { 779 fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, mustGetUUID()) 780 defer fsRemoveAll(ctx, fsTmpObjPath) // remove multipart temporary files in background. 781 782 fsSimpleRenameFile(ctx, uploadIDDir, fsTmpObjPath) 783 784 // It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object) 785 fsRemoveDir(ctx, fs.getMultipartSHADir(bucket, object)) 786 } 787 788 fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) 789 if err != nil { 790 return oi, toObjectErr(err, bucket, object) 791 } 792 793 return fsMeta.ToObjectInfo(bucket, object, fi), nil 794 } 795 796 // AbortMultipartUpload - aborts an ongoing multipart operation 797 // signified by the input uploadID. This is an atomic operation 798 // doesn't require clients to initiate multiple such requests. 799 // 800 // All parts are purged from all disks and reference to the uploadID 801 // would be removed from the system, rollback is not possible on this 802 // operation. 803 // 804 // Implements S3 compatible Abort multipart API, slight difference is 805 // that this is an atomic idempotent operation. Subsequent calls have 806 // no affect and further requests to the same uploadID would not be 807 // honored. 808 func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error { 809 if err := checkAbortMultipartArgs(ctx, bucket, object, fs); err != nil { 810 return err 811 } 812 813 if _, err := fs.statBucketDir(ctx, bucket); err != nil { 814 return toObjectErr(err, bucket) 815 } 816 817 fs.appendFileMapMu.Lock() 818 // Remove file in tmp folder 819 file := fs.appendFileMap[uploadID] 820 if file != nil { 821 fsRemoveFile(ctx, file.filePath) 822 } 823 delete(fs.appendFileMap, uploadID) 824 fs.appendFileMapMu.Unlock() 825 826 uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) 827 // Just check if the uploadID exists to avoid copy if it doesn't. 828 _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) 829 if err != nil { 830 if err == errFileNotFound || err == errFileAccessDenied { 831 return InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} 832 } 833 return toObjectErr(err, bucket, object) 834 } 835 836 // Purge multipart folders 837 { 838 fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, mustGetUUID()) 839 defer fsRemoveAll(ctx, fsTmpObjPath) // remove multipart temporary files in background. 840 841 fsSimpleRenameFile(ctx, uploadIDDir, fsTmpObjPath) 842 843 // It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object) 844 fsRemoveDir(ctx, fs.getMultipartSHADir(bucket, object)) 845 } 846 847 return nil 848 } 849 850 // Removes multipart uploads if any older than `expiry` duration 851 // on all buckets for every `cleanupInterval`, this function is 852 // blocking and should be run in a go-routine. 853 func (fs *FSObjects) cleanupStaleUploads(ctx context.Context, cleanupInterval, expiry time.Duration) { 854 timer := time.NewTimer(cleanupInterval) 855 defer timer.Stop() 856 857 for { 858 select { 859 case <-ctx.Done(): 860 return 861 case <-timer.C: 862 // Reset for the next interval 863 timer.Reset(cleanupInterval) 864 865 now := time.Now() 866 entries, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket)) 867 if err != nil { 868 continue 869 } 870 for _, entry := range entries { 871 uploadIDs, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry)) 872 if err != nil { 873 continue 874 } 875 876 // Remove the trailing slash separator 877 for i := range uploadIDs { 878 uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], SlashSeparator) 879 } 880 881 for _, uploadID := range uploadIDs { 882 fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID)) 883 if err != nil { 884 continue 885 } 886 if now.Sub(fi.ModTime()) > expiry { 887 fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID)) 888 // It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object) 889 fsRemoveDir(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry)) 890 891 // Remove uploadID from the append file map and its corresponding temporary file 892 fs.appendFileMapMu.Lock() 893 bgAppend, ok := fs.appendFileMap[uploadID] 894 if ok { 895 _ = fsRemoveFile(ctx, bgAppend.filePath) 896 delete(fs.appendFileMap, uploadID) 897 } 898 fs.appendFileMapMu.Unlock() 899 } 900 } 901 } 902 } 903 } 904 }