github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/object-api-putobject_test.go (about) 1 // Copyright (c) 2015-2021 MinIO, Inc. 2 // 3 // This file is part of MinIO Object Storage stack 4 // 5 // This program is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Affero General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // This program is distributed in the hope that it will be useful 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Affero General Public License for more details. 14 // 15 // You should have received a copy of the GNU Affero General Public License 16 // along with this program. If not, see <http://www.gnu.org/licenses/>. 17 18 package cmd 19 20 import ( 21 "bytes" 22 "context" 23 "crypto/md5" 24 "encoding/hex" 25 "errors" 26 "os" 27 "path" 28 "testing" 29 30 "github.com/dustin/go-humanize" 31 "github.com/minio/minio/internal/hash" 32 "github.com/minio/minio/internal/ioutil" 33 ) 34 35 func md5Header(data []byte) map[string]string { 36 return map[string]string{"etag": getMD5Hash(data)} 37 } 38 39 // Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. 40 func TestObjectAPIPutObjectSingle(t *testing.T) { 41 ExecExtendedObjectLayerTest(t, testObjectAPIPutObject) 42 } 43 44 // Tests validate correctness of PutObject. 45 func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) { 46 // Generating cases for which the PutObject fails. 47 bucket := "minio-bucket" 48 object := "minio-object" 49 50 // Create bucket. 51 err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{}) 52 if err != nil { 53 // Failed to create newbucket, abort. 54 t.Fatalf("%s : %s", instanceType, err.Error()) 55 } 56 57 // Creating a dummy bucket for tests. 58 err = obj.MakeBucket(context.Background(), "unused-bucket", MakeBucketOptions{}) 59 if err != nil { 60 // Failed to create newbucket, abort. 61 t.Fatalf("%s : %s", instanceType, err.Error()) 62 } 63 64 var ( 65 nilBytes []byte 66 data = []byte("hello") 67 fiveMBBytes = bytes.Repeat([]byte("a"), 5*humanize.MiByte) 68 ) 69 invalidMD5 := getMD5Hash([]byte("meh")) 70 invalidMD5Header := md5Header([]byte("meh")) 71 72 testCases := []struct { 73 bucketName string 74 objName string 75 inputData []byte 76 inputMeta map[string]string 77 inputSHA256 string 78 inputDataSize int64 79 // expected error output. 80 expectedMd5 string 81 expectedError error 82 }{ 83 // Cases with invalid bucket name. 84 0: {bucketName: ".test", objName: "obj", inputData: []byte(""), expectedError: BucketNameInvalid{Bucket: ".test"}}, 85 1: {bucketName: "------", objName: "obj", inputData: []byte(""), expectedError: BucketNameInvalid{Bucket: "------"}}, 86 2: { 87 bucketName: "$this-is-not-valid-too", objName: "obj", inputData: []byte(""), 88 expectedError: BucketNameInvalid{Bucket: "$this-is-not-valid-too"}, 89 }, 90 3: {bucketName: "a", objName: "obj", inputData: []byte(""), expectedError: BucketNameInvalid{Bucket: "a"}}, 91 92 // Case with invalid object names. 93 4: {bucketName: bucket, inputData: []byte(""), expectedError: ObjectNameInvalid{Bucket: bucket, Object: ""}}, 94 95 // Valid object and bucket names but non-existent bucket. 96 5: {bucketName: "abc", objName: "def", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "abc"}}, 97 98 // Input to replicate Md5 mismatch. 99 6: { 100 bucketName: bucket, objName: object, inputData: []byte(""), 101 inputMeta: map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"}, 102 expectedError: hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}, 103 }, 104 105 // With incorrect sha256. 106 7: { 107 bucketName: bucket, objName: object, inputData: []byte("abcd"), 108 inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, 109 inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", inputDataSize: int64(len("abcd")), 110 expectedError: hash.SHA256Mismatch{ 111 ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", 112 CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 113 }, 114 }, 115 116 // Input with size more than the size of actual data inside the reader. 117 8: { 118 bucketName: bucket, objName: object, inputData: []byte("abcd"), 119 inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, inputDataSize: int64(len("abcd") + 1), 120 expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"}, 121 }, 122 123 // Input with size less than the size of actual data inside the reader. 124 9: { 125 bucketName: bucket, objName: object, inputData: []byte("abcd"), 126 inputMeta: map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, inputDataSize: int64(len("abcd") - 1), 127 expectedError: ioutil.ErrOverread, 128 }, 129 130 // Validating for success cases. 131 10: {bucketName: bucket, objName: object, inputData: []byte("abcd"), inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, inputDataSize: int64(len("abcd"))}, 132 11: {bucketName: bucket, objName: object, inputData: []byte("efgh"), inputMeta: map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, inputDataSize: int64(len("efgh"))}, 133 12: {bucketName: bucket, objName: object, inputData: []byte("ijkl"), inputMeta: map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, inputDataSize: int64(len("ijkl"))}, 134 13: {bucketName: bucket, objName: object, inputData: []byte("mnop"), inputMeta: map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, inputDataSize: int64(len("mnop"))}, 135 136 // With no metadata 137 14: {bucketName: bucket, objName: object, inputData: data, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)}, 138 15: {bucketName: bucket, objName: object, inputData: nilBytes, inputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)}, 139 16: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)}, 140 141 // With arbitrary metadata 142 17: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"answer": "42"}, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)}, 143 18: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: map[string]string{"answer": "42"}, inputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)}, 144 19: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: map[string]string{"answer": "42"}, inputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)}, 145 146 // With valid md5sum and sha256. 147 20: {bucketName: bucket, objName: object, inputData: data, inputMeta: md5Header(data), inputSHA256: getSHA256Hash(data), inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)}, 148 21: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: md5Header(nilBytes), inputSHA256: getSHA256Hash(nilBytes), inputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)}, 149 22: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: md5Header(fiveMBBytes), inputSHA256: getSHA256Hash(fiveMBBytes), inputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)}, 150 151 // data with invalid md5sum in header 152 23: { 153 bucketName: bucket, objName: object, inputData: data, inputMeta: invalidMD5Header, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data), 154 expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)}, 155 }, 156 24: { 157 bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: invalidMD5Header, inputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes), 158 expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)}, 159 }, 160 25: { 161 bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: invalidMD5Header, inputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes), 162 expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)}, 163 }, 164 165 // data with size different from the actual number of bytes available in the reader 166 26: {bucketName: bucket, objName: object, inputData: data, inputDataSize: int64(len(data) - 1), expectedMd5: getMD5Hash(data[:len(data)-1]), expectedError: ioutil.ErrOverread}, 167 27: {bucketName: bucket, objName: object, inputData: nilBytes, inputDataSize: int64(len(nilBytes) + 1), expectedMd5: getMD5Hash(nilBytes), expectedError: IncompleteBody{Bucket: bucket, Object: object}}, 168 28: {bucketName: bucket, objName: object, inputData: fiveMBBytes, expectedMd5: getMD5Hash(fiveMBBytes), expectedError: ioutil.ErrOverread}, 169 170 // valid data with X-Amz-Meta- meta 171 29: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"X-Amz-Meta-AppID": "a42"}, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)}, 172 173 // Put an empty object with a trailing slash 174 30: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})}, 175 // Put an object inside the empty directory 176 31: {bucketName: bucket, objName: "emptydir/" + object, inputData: data, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)}, 177 // Put the empty object with a trailing slash again (refer to Test case 30), this needs to succeed 178 32: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})}, 179 180 // With invalid crc32. 181 33: { 182 bucketName: bucket, objName: object, inputData: []byte("abcd"), 183 inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f", "x-amz-checksum-crc32": "abcd"}, 184 inputDataSize: int64(len("abcd")), 185 }, 186 } 187 for i, testCase := range testCases { 188 in := mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.inputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256) 189 objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, in, ObjectOptions{UserDefined: testCase.inputMeta}) 190 if actualErr != nil && testCase.expectedError == nil { 191 t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i, instanceType, actualErr.Error()) 192 continue 193 } 194 if actualErr == nil && testCase.expectedError != nil { 195 t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but passed instead.", i, instanceType, testCase.expectedError.Error()) 196 continue 197 } 198 // Failed as expected, but does it fail for the expected reason. 199 if actualErr != nil && actualErr != testCase.expectedError { 200 t.Errorf("Test %d: %s: Expected to fail with error \"%v\", but instead failed with error \"%v\" instead.", i, instanceType, testCase.expectedError, actualErr) 201 continue 202 } 203 // Test passes as expected, but the output values are verified for correctness here. 204 if actualErr == nil { 205 // Asserting whether the md5 output is correct. 206 if expectedMD5, ok := testCase.inputMeta["etag"]; ok && expectedMD5 != objInfo.ETag { 207 t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i, instanceType, objInfo.ETag) 208 continue 209 } 210 } 211 } 212 } 213 214 // Wrapper for calling PutObject tests for both Erasure multiple disks case 215 // when quorum is not available. 216 func TestObjectAPIPutObjectDiskNotFound(t *testing.T) { 217 ExecObjectLayerDiskAlteredTest(t, testObjectAPIPutObjectDiskNotFound) 218 } 219 220 // Tests validate correctness of PutObject. 221 func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { 222 // Generating cases for which the PutObject fails. 223 bucket := "minio-bucket" 224 object := "minio-object" 225 226 // Create bucket. 227 err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{}) 228 if err != nil { 229 // Failed to create newbucket, abort. 230 t.Fatalf("%s : %s", instanceType, err.Error()) 231 } 232 233 // Creating a dummy bucket for tests. 234 err = obj.MakeBucket(context.Background(), "unused-bucket", MakeBucketOptions{}) 235 if err != nil { 236 // Failed to create newbucket, abort. 237 t.Fatalf("%s : %s", instanceType, err.Error()) 238 } 239 240 // Take 4 disks down, one more we loose quorum on 16 disk node. 241 for _, disk := range disks[:4] { 242 os.RemoveAll(disk) 243 } 244 245 testCases := []struct { 246 bucketName string 247 objName string 248 inputData []byte 249 inputMeta map[string]string 250 inputDataSize int64 251 // flag indicating whether the test should pass. 252 shouldPass bool 253 // expected error output. 254 expectedMd5 string 255 expectedError error 256 }{ 257 // Validating for success cases. 258 {bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, int64(len("abcd")), true, "", nil}, 259 {bucket, object, []byte("efgh"), map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, int64(len("efgh")), true, "", nil}, 260 {bucket, object, []byte("ijkl"), map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, int64(len("ijkl")), true, "", nil}, 261 {bucket, object, []byte("mnop"), map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, int64(len("mnop")), true, "", nil}, 262 } 263 264 sha256sum := "" 265 for i, testCase := range testCases { 266 objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.inputDataSize, testCase.inputMeta["etag"], sha256sum), ObjectOptions{UserDefined: testCase.inputMeta}) 267 if actualErr != nil && testCase.shouldPass { 268 t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error()) 269 } 270 if actualErr == nil && !testCase.shouldPass { 271 t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, instanceType, testCase.expectedError.Error()) 272 } 273 // Failed as expected, but does it fail for the expected reason. 274 if actualErr != nil && !testCase.shouldPass { 275 if testCase.expectedError.Error() != actualErr.Error() { 276 t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, 277 instanceType, testCase.expectedError.Error(), actualErr.Error()) 278 } 279 } 280 // Test passes as expected, but the output values are verified for correctness here. 281 if actualErr == nil && testCase.shouldPass { 282 // Asserting whether the md5 output is correct. 283 if testCase.inputMeta["etag"] != objInfo.ETag { 284 t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.ETag) 285 } 286 } 287 } 288 289 // This causes quorum failure verify. 290 os.RemoveAll(disks[len(disks)-1]) 291 292 // Validate the last test. 293 testCase := struct { 294 bucketName string 295 objName string 296 inputData []byte 297 inputMeta map[string]string 298 inputDataSize int64 299 // flag indicating whether the test should pass. 300 shouldPass bool 301 // expected error output. 302 expectedMd5 string 303 expectedError error 304 }{ 305 bucket, 306 object, 307 []byte("mnop"), 308 map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, 309 int64(len("mnop")), 310 false, 311 "", 312 errErasureWriteQuorum, 313 } 314 315 _, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.inputDataSize, testCase.inputMeta["etag"], sha256sum), ObjectOptions{UserDefined: testCase.inputMeta}) 316 if actualErr != nil && testCase.shouldPass { 317 t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error()) 318 } 319 // Failed as expected, but does it fail for the expected reason. 320 if actualErr != nil && !testCase.shouldPass { 321 if !errors.Is(actualErr, testCase.expectedError) { 322 t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", len(testCases)+1, instanceType, testCase.expectedError.Error(), actualErr.Error()) 323 } 324 } 325 } 326 327 // Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. 328 func TestObjectAPIPutObjectStaleFiles(t *testing.T) { 329 ExecObjectLayerStaleFilesTest(t, testObjectAPIPutObjectStaleFiles) 330 } 331 332 // Tests validate correctness of PutObject. 333 func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { 334 // Generating cases for which the PutObject fails. 335 bucket := "minio-bucket" 336 object := "minio-object" 337 338 // Create bucket. 339 err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{}) 340 if err != nil { 341 // Failed to create newbucket, abort. 342 t.Fatalf("%s : %s", instanceType, err.Error()) 343 } 344 345 data := []byte("hello, world") 346 // Create object. 347 _, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) 348 if err != nil { 349 // Failed to create object, abort. 350 t.Fatalf("%s : %s", instanceType, err.Error()) 351 } 352 353 for _, disk := range disks { 354 tmpMetaDir := path.Join(disk, minioMetaTmpBucket) 355 files, err := os.ReadDir(tmpMetaDir) 356 if err != nil { 357 t.Fatal(err) 358 } 359 var found bool 360 for _, fi := range files { 361 if fi.Name() == ".trash" { 362 continue 363 } 364 found = true 365 } 366 if found { 367 t.Fatalf("%s: expected: empty, got: non-empty %#v", minioMetaTmpBucket, files) 368 } 369 } 370 } 371 372 // Wrapper for calling Multipart PutObject tests for both Erasure multiple disks and single node setup. 373 func TestObjectAPIMultipartPutObjectStaleFiles(t *testing.T) { 374 ExecObjectLayerStaleFilesTest(t, testObjectAPIMultipartPutObjectStaleFiles) 375 } 376 377 // Tests validate correctness of PutObject. 378 func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { 379 // Generating cases for which the PutObject fails. 380 bucket := "minio-bucket" 381 object := "minio-object" 382 383 // Create bucket. 384 err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{}) 385 if err != nil { 386 // Failed to create newbucket, abort. 387 t.Fatalf("%s : %s", instanceType, err.Error()) 388 } 389 opts := ObjectOptions{} 390 // Initiate Multipart Upload on the above created bucket. 391 res, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts) 392 if err != nil { 393 // Failed to create NewMultipartUpload, abort. 394 t.Fatalf("%s : %s", instanceType, err.Error()) 395 } 396 uploadID := res.UploadID 397 398 // Upload part1. 399 fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte) 400 md5Writer := md5.New() 401 md5Writer.Write(fiveMBBytes) 402 etag1 := hex.EncodeToString(md5Writer.Sum(nil)) 403 sha256sum := "" 404 _, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum), opts) 405 if err != nil { 406 // Failed to upload object part, abort. 407 t.Fatalf("%s : %s", instanceType, err.Error()) 408 } 409 410 // Upload part2. 411 data := []byte("hello, world") 412 md5Writer = md5.New() 413 md5Writer.Write(data) 414 etag2 := hex.EncodeToString(md5Writer.Sum(nil)) 415 _, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum), opts) 416 if err != nil { 417 // Failed to upload object part, abort. 418 t.Fatalf("%s : %s", instanceType, err.Error()) 419 } 420 421 // Complete multipart. 422 parts := []CompletePart{ 423 {ETag: etag1, PartNumber: 1}, 424 {ETag: etag2, PartNumber: 2}, 425 } 426 _, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, parts, ObjectOptions{}) 427 if err != nil { 428 // Failed to complete multipart upload, abort. 429 t.Fatalf("%s : %s", instanceType, err.Error()) 430 } 431 432 for _, disk := range disks { 433 tmpMetaDir := path.Join(disk, minioMetaTmpBucket) 434 files, err := os.ReadDir(tmpMetaDir) 435 if err != nil { 436 // It's OK to have non-existing tmpMetaDir. 437 if osIsNotExist(err) { 438 continue 439 } 440 441 // Print the error 442 t.Errorf("%s", err) 443 } 444 445 var found bool 446 for _, fi := range files { 447 if fi.Name() == ".trash" { 448 continue 449 } 450 found = true 451 break 452 } 453 454 if found { 455 t.Fatalf("%s: expected: empty, got: non-empty. content: %#v", tmpMetaDir, files) 456 } 457 } 458 } 459 460 // Benchmarks for ObjectLayer.PutObject(). 461 // The intent is to benchmark PutObject for various sizes ranging from few bytes to 100MB. 462 // Also each of these Benchmarks are run both Erasure and FS backends. 463 464 // BenchmarkPutObjectVerySmallFS - Benchmark FS.PutObject() for object size of 10 bytes. 465 func BenchmarkPutObjectVerySmallFS(b *testing.B) { 466 benchmarkPutObject(b, "FS", 10) 467 } 468 469 // BenchmarkPutObjectVerySmallErasure - Benchmark Erasure.PutObject() for object size of 10 bytes. 470 func BenchmarkPutObjectVerySmallErasure(b *testing.B) { 471 benchmarkPutObject(b, "Erasure", 10) 472 } 473 474 // BenchmarkPutObject10KbFS - Benchmark FS.PutObject() for object size of 10KB. 475 func BenchmarkPutObject10KbFS(b *testing.B) { 476 benchmarkPutObject(b, "FS", 10*humanize.KiByte) 477 } 478 479 // BenchmarkPutObject10KbErasure - Benchmark Erasure.PutObject() for object size of 10KB. 480 func BenchmarkPutObject10KbErasure(b *testing.B) { 481 benchmarkPutObject(b, "Erasure", 10*humanize.KiByte) 482 } 483 484 // BenchmarkPutObject100KbFS - Benchmark FS.PutObject() for object size of 100KB. 485 func BenchmarkPutObject100KbFS(b *testing.B) { 486 benchmarkPutObject(b, "FS", 100*humanize.KiByte) 487 } 488 489 // BenchmarkPutObject100KbErasure - Benchmark Erasure.PutObject() for object size of 100KB. 490 func BenchmarkPutObject100KbErasure(b *testing.B) { 491 benchmarkPutObject(b, "Erasure", 100*humanize.KiByte) 492 } 493 494 // BenchmarkPutObject1MbFS - Benchmark FS.PutObject() for object size of 1MB. 495 func BenchmarkPutObject1MbFS(b *testing.B) { 496 benchmarkPutObject(b, "FS", 1*humanize.MiByte) 497 } 498 499 // BenchmarkPutObject1MbErasure - Benchmark Erasure.PutObject() for object size of 1MB. 500 func BenchmarkPutObject1MbErasure(b *testing.B) { 501 benchmarkPutObject(b, "Erasure", 1*humanize.MiByte) 502 } 503 504 // BenchmarkPutObject5MbFS - Benchmark FS.PutObject() for object size of 5MB. 505 func BenchmarkPutObject5MbFS(b *testing.B) { 506 benchmarkPutObject(b, "FS", 5*humanize.MiByte) 507 } 508 509 // BenchmarkPutObject5MbErasure - Benchmark Erasure.PutObject() for object size of 5MB. 510 func BenchmarkPutObject5MbErasure(b *testing.B) { 511 benchmarkPutObject(b, "Erasure", 5*humanize.MiByte) 512 } 513 514 // BenchmarkPutObject10MbFS - Benchmark FS.PutObject() for object size of 10MB. 515 func BenchmarkPutObject10MbFS(b *testing.B) { 516 benchmarkPutObject(b, "FS", 10*humanize.MiByte) 517 } 518 519 // BenchmarkPutObject10MbErasure - Benchmark Erasure.PutObject() for object size of 10MB. 520 func BenchmarkPutObject10MbErasure(b *testing.B) { 521 benchmarkPutObject(b, "Erasure", 10*humanize.MiByte) 522 } 523 524 // BenchmarkPutObject25MbFS - Benchmark FS.PutObject() for object size of 25MB. 525 func BenchmarkPutObject25MbFS(b *testing.B) { 526 benchmarkPutObject(b, "FS", 25*humanize.MiByte) 527 } 528 529 // BenchmarkPutObject25MbErasure - Benchmark Erasure.PutObject() for object size of 25MB. 530 func BenchmarkPutObject25MbErasure(b *testing.B) { 531 benchmarkPutObject(b, "Erasure", 25*humanize.MiByte) 532 } 533 534 // BenchmarkPutObject50MbFS - Benchmark FS.PutObject() for object size of 50MB. 535 func BenchmarkPutObject50MbFS(b *testing.B) { 536 benchmarkPutObject(b, "FS", 50*humanize.MiByte) 537 } 538 539 // BenchmarkPutObject50MbErasure - Benchmark Erasure.PutObject() for object size of 50MB. 540 func BenchmarkPutObject50MbErasure(b *testing.B) { 541 benchmarkPutObject(b, "Erasure", 50*humanize.MiByte) 542 } 543 544 // parallel benchmarks for ObjectLayer.PutObject() . 545 546 // BenchmarkParallelPutObjectVerySmallFS - BenchmarkParallel FS.PutObject() for object size of 10 bytes. 547 func BenchmarkParallelPutObjectVerySmallFS(b *testing.B) { 548 benchmarkPutObjectParallel(b, "FS", 10) 549 } 550 551 // BenchmarkParallelPutObjectVerySmallErasure - BenchmarkParallel Erasure.PutObject() for object size of 10 bytes. 552 func BenchmarkParallelPutObjectVerySmallErasure(b *testing.B) { 553 benchmarkPutObjectParallel(b, "Erasure", 10) 554 } 555 556 // BenchmarkParallelPutObject10KbFS - BenchmarkParallel FS.PutObject() for object size of 10KB. 557 func BenchmarkParallelPutObject10KbFS(b *testing.B) { 558 benchmarkPutObjectParallel(b, "FS", 10*humanize.KiByte) 559 } 560 561 // BenchmarkParallelPutObject10KbErasure - BenchmarkParallel Erasure.PutObject() for object size of 10KB. 562 func BenchmarkParallelPutObject10KbErasure(b *testing.B) { 563 benchmarkPutObjectParallel(b, "Erasure", 10*humanize.KiByte) 564 } 565 566 // BenchmarkParallelPutObject100KbFS - BenchmarkParallel FS.PutObject() for object size of 100KB. 567 func BenchmarkParallelPutObject100KbFS(b *testing.B) { 568 benchmarkPutObjectParallel(b, "FS", 100*humanize.KiByte) 569 } 570 571 // BenchmarkParallelPutObject100KbErasure - BenchmarkParallel Erasure.PutObject() for object size of 100KB. 572 func BenchmarkParallelPutObject100KbErasure(b *testing.B) { 573 benchmarkPutObjectParallel(b, "Erasure", 100*humanize.KiByte) 574 } 575 576 // BenchmarkParallelPutObject1MbFS - BenchmarkParallel FS.PutObject() for object size of 1MB. 577 func BenchmarkParallelPutObject1MbFS(b *testing.B) { 578 benchmarkPutObjectParallel(b, "FS", 1*humanize.MiByte) 579 } 580 581 // BenchmarkParallelPutObject1MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 1MB. 582 func BenchmarkParallelPutObject1MbErasure(b *testing.B) { 583 benchmarkPutObjectParallel(b, "Erasure", 1*humanize.MiByte) 584 } 585 586 // BenchmarkParallelPutObject5MbFS - BenchmarkParallel FS.PutObject() for object size of 5MB. 587 func BenchmarkParallelPutObject5MbFS(b *testing.B) { 588 benchmarkPutObjectParallel(b, "FS", 5*humanize.MiByte) 589 } 590 591 // BenchmarkParallelPutObject5MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 5MB. 592 func BenchmarkParallelPutObject5MbErasure(b *testing.B) { 593 benchmarkPutObjectParallel(b, "Erasure", 5*humanize.MiByte) 594 } 595 596 // BenchmarkParallelPutObject10MbFS - BenchmarkParallel FS.PutObject() for object size of 10MB. 597 func BenchmarkParallelPutObject10MbFS(b *testing.B) { 598 benchmarkPutObjectParallel(b, "FS", 10*humanize.MiByte) 599 } 600 601 // BenchmarkParallelPutObject10MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 10MB. 602 func BenchmarkParallelPutObject10MbErasure(b *testing.B) { 603 benchmarkPutObjectParallel(b, "Erasure", 10*humanize.MiByte) 604 } 605 606 // BenchmarkParallelPutObject25MbFS - BenchmarkParallel FS.PutObject() for object size of 25MB. 607 func BenchmarkParallelPutObject25MbFS(b *testing.B) { 608 benchmarkPutObjectParallel(b, "FS", 25*humanize.MiByte) 609 } 610 611 // BenchmarkParallelPutObject25MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 25MB. 612 func BenchmarkParallelPutObject25MbErasure(b *testing.B) { 613 benchmarkPutObjectParallel(b, "Erasure", 25*humanize.MiByte) 614 }