storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/object-api-putobject_test.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2016 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "bytes" 21 "context" 22 "crypto/md5" 23 "encoding/hex" 24 "errors" 25 "io/ioutil" 26 "os" 27 "path" 28 "testing" 29 30 humanize "github.com/dustin/go-humanize" 31 32 "storj.io/minio/pkg/hash" 33 ) 34 35 func md5Header(data []byte) map[string]string { 36 return map[string]string{"etag": getMD5Hash([]byte(data))} 37 } 38 39 // Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. 40 func TestObjectAPIPutObjectSingle(t *testing.T) { 41 ExecExtendedObjectLayerTest(t, testObjectAPIPutObject) 42 } 43 44 // Tests validate correctness of PutObject. 45 func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) { 46 // Generating cases for which the PutObject fails. 47 bucket := "minio-bucket" 48 object := "minio-object" 49 50 // Create bucket. 51 err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) 52 if err != nil { 53 // Failed to create newbucket, abort. 54 t.Fatalf("%s : %s", instanceType, err.Error()) 55 } 56 57 // Creating a dummy bucket for tests. 58 err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) 59 if err != nil { 60 // Failed to create newbucket, abort. 61 t.Fatalf("%s : %s", instanceType, err.Error()) 62 } 63 64 var ( 65 nilBytes []byte 66 data = []byte("hello") 67 fiveMBBytes = bytes.Repeat([]byte("a"), 5*humanize.MiByte) 68 ) 69 invalidMD5 := getMD5Hash([]byte("meh")) 70 invalidMD5Header := md5Header([]byte("meh")) 71 72 testCases := []struct { 73 bucketName string 74 objName string 75 inputData []byte 76 inputMeta map[string]string 77 inputSHA256 string 78 intputDataSize int64 79 // expected error output. 80 expectedMd5 string 81 expectedError error 82 }{ 83 // Test case 1-4. 84 // Cases with invalid bucket name. 85 {".test", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: ".test"}}, 86 {"------", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "------"}}, 87 {"$this-is-not-valid-too", "obj", []byte(""), nil, "", 0, "", 88 BucketNotFound{Bucket: "$this-is-not-valid-too"}}, 89 {"a", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "a"}}, 90 91 // Test case - 5. 92 // Case with invalid object names. 93 {bucket, "", []byte(""), nil, "", 0, "", ObjectNameInvalid{Bucket: bucket, Object: ""}}, 94 95 // Test case - 6. 96 // Valid object and bucket names but non-existent bucket. 97 {"abc", "def", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "abc"}}, 98 99 // Test case - 7. 100 // Input to replicate Md5 mismatch. 101 {bucket, object, []byte(""), map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"}, "", 0, "", 102 hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}}, 103 104 // Test case - 8. 105 // With incorrect sha256. 106 {bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, 107 "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", int64(len("abcd")), 108 "", hash.SHA256Mismatch{ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", 109 CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589"}}, 110 111 // Test case - 9. 112 // Input with size more than the size of actual data inside the reader. 113 {bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, "", int64(len("abcd") + 1), "", 114 hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"}}, 115 116 // Test case - 10. 117 // Input with size less than the size of actual data inside the reader. 118 {bucket, object, []byte("abcd"), map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, "", int64(len("abcd") - 1), "", 119 hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"}}, 120 121 // Test case - 11-14. 122 // Validating for success cases. 123 {bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, "", int64(len("abcd")), "", nil}, 124 {bucket, object, []byte("efgh"), map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, "", int64(len("efgh")), "", nil}, 125 {bucket, object, []byte("ijkl"), map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, "", int64(len("ijkl")), "", nil}, 126 {bucket, object, []byte("mnop"), map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, "", int64(len("mnop")), "", nil}, 127 128 // Test case 15-17. 129 // With no metadata 130 {bucket, object, data, nil, "", int64(len(data)), getMD5Hash(data), nil}, 131 {bucket, object, nilBytes, nil, "", int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, 132 {bucket, object, fiveMBBytes, nil, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, 133 134 // Test case 18-20. 135 // With arbitrary metadata 136 {bucket, object, data, map[string]string{"answer": "42"}, "", int64(len(data)), getMD5Hash(data), nil}, 137 {bucket, object, nilBytes, map[string]string{"answer": "42"}, "", int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, 138 {bucket, object, fiveMBBytes, map[string]string{"answer": "42"}, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, 139 140 // Test case 21-23. 141 // With valid md5sum and sha256. 142 {bucket, object, data, md5Header(data), getSHA256Hash(data), int64(len(data)), getMD5Hash(data), nil}, 143 {bucket, object, nilBytes, md5Header(nilBytes), getSHA256Hash(nilBytes), int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, 144 {bucket, object, fiveMBBytes, md5Header(fiveMBBytes), getSHA256Hash(fiveMBBytes), int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, 145 146 // Test case 24-26. 147 // data with invalid md5sum in header 148 {bucket, object, data, invalidMD5Header, "", int64(len(data)), getMD5Hash(data), 149 hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)}}, 150 {bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), getMD5Hash(nilBytes), 151 hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)}}, 152 {bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), 153 hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)}}, 154 155 // Test case 27-29. 156 // data with size different from the actual number of bytes available in the reader 157 {bucket, object, data, nil, "", int64(len(data) - 1), getMD5Hash(data[:len(data)-1]), nil}, 158 {bucket, object, nilBytes, nil, "", int64(len(nilBytes) + 1), getMD5Hash(nilBytes), IncompleteBody{Bucket: bucket, Object: object}}, 159 {bucket, object, fiveMBBytes, nil, "", 0, getMD5Hash(fiveMBBytes), nil}, 160 161 // Test case 30 162 // valid data with X-Amz-Meta- meta 163 {bucket, object, data, map[string]string{"X-Amz-Meta-AppID": "a42"}, "", int64(len(data)), getMD5Hash(data), nil}, 164 165 // Test case 31 166 // Put an empty object with a trailing slash 167 {bucket, "emptydir/", []byte{}, nil, "", 0, getMD5Hash([]byte{}), nil}, 168 // Test case 32 169 // Put an object inside the empty directory 170 {bucket, "emptydir/" + object, data, nil, "", int64(len(data)), getMD5Hash(data), nil}, 171 // Test case 33 172 // Put the empty object with a trailing slash again (refer to Test case 31), this needs to succeed 173 {bucket, "emptydir/", []byte{}, nil, "", 0, getMD5Hash([]byte{}), nil}, 174 } 175 176 for i, testCase := range testCases { 177 in := mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256) 178 objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, in, ObjectOptions{UserDefined: testCase.inputMeta}) 179 if actualErr != nil && testCase.expectedError == nil { 180 t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error()) 181 continue 182 } 183 if actualErr == nil && testCase.expectedError != nil { 184 t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but passed instead.", i+1, instanceType, testCase.expectedError.Error()) 185 continue 186 } 187 // Failed as expected, but does it fail for the expected reason. 188 if actualErr != nil && actualErr != testCase.expectedError { 189 t.Errorf("Test %d: %s: Expected to fail with error \"%v\", but instead failed with error \"%v\" instead.", i+1, instanceType, testCase.expectedError, actualErr) 190 continue 191 } 192 // Test passes as expected, but the output values are verified for correctness here. 193 if actualErr == nil { 194 // Asserting whether the md5 output is correct. 195 if expectedMD5, ok := testCase.inputMeta["etag"]; ok && expectedMD5 != objInfo.ETag { 196 t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.ETag) 197 continue 198 } 199 } 200 } 201 } 202 203 // Wrapper for calling PutObject tests for both Erasure multiple disks case 204 // when quorum is not available. 205 func TestObjectAPIPutObjectDiskNotFound(t *testing.T) { 206 ExecObjectLayerDiskAlteredTest(t, testObjectAPIPutObjectDiskNotFound) 207 } 208 209 // Tests validate correctness of PutObject. 210 func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { 211 // Generating cases for which the PutObject fails. 212 bucket := "minio-bucket" 213 object := "minio-object" 214 215 // Create bucket. 216 err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) 217 if err != nil { 218 // Failed to create newbucket, abort. 219 t.Fatalf("%s : %s", instanceType, err.Error()) 220 } 221 222 // Creating a dummy bucket for tests. 223 err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) 224 if err != nil { 225 // Failed to create newbucket, abort. 226 t.Fatalf("%s : %s", instanceType, err.Error()) 227 } 228 229 // Take 4 disks down, one more we loose quorum on 16 disk node. 230 for _, disk := range disks[:4] { 231 os.RemoveAll(disk) 232 } 233 234 testCases := []struct { 235 bucketName string 236 objName string 237 inputData []byte 238 inputMeta map[string]string 239 intputDataSize int64 240 // flag indicating whether the test should pass. 241 shouldPass bool 242 // expected error output. 243 expectedMd5 string 244 expectedError error 245 }{ 246 // Validating for success cases. 247 {bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, int64(len("abcd")), true, "", nil}, 248 {bucket, object, []byte("efgh"), map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, int64(len("efgh")), true, "", nil}, 249 {bucket, object, []byte("ijkl"), map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, int64(len("ijkl")), true, "", nil}, 250 {bucket, object, []byte("mnop"), map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, int64(len("mnop")), true, "", nil}, 251 } 252 253 sha256sum := "" 254 for i, testCase := range testCases { 255 objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), ObjectOptions{UserDefined: testCase.inputMeta}) 256 if actualErr != nil && testCase.shouldPass { 257 t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error()) 258 } 259 if actualErr == nil && !testCase.shouldPass { 260 t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, instanceType, testCase.expectedError.Error()) 261 } 262 // Failed as expected, but does it fail for the expected reason. 263 if actualErr != nil && !testCase.shouldPass { 264 if testCase.expectedError.Error() != actualErr.Error() { 265 t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, 266 instanceType, testCase.expectedError.Error(), actualErr.Error()) 267 } 268 } 269 // Test passes as expected, but the output values are verified for correctness here. 270 if actualErr == nil && testCase.shouldPass { 271 // Asserting whether the md5 output is correct. 272 if testCase.inputMeta["etag"] != objInfo.ETag { 273 t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.ETag) 274 } 275 } 276 } 277 278 // This causes quorum failure verify. 279 os.RemoveAll(disks[len(disks)-1]) 280 281 // Validate the last test. 282 testCase := struct { 283 bucketName string 284 objName string 285 inputData []byte 286 inputMeta map[string]string 287 intputDataSize int64 288 // flag indicating whether the test should pass. 289 shouldPass bool 290 // expected error output. 291 expectedMd5 string 292 expectedError error 293 }{ 294 bucket, 295 object, 296 []byte("mnop"), 297 map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, 298 int64(len("mnop")), 299 false, 300 "", 301 errErasureWriteQuorum, 302 } 303 304 _, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), ObjectOptions{UserDefined: testCase.inputMeta}) 305 if actualErr != nil && testCase.shouldPass { 306 t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error()) 307 } 308 // Failed as expected, but does it fail for the expected reason. 309 if actualErr != nil && !testCase.shouldPass { 310 if !errors.Is(actualErr, testCase.expectedError) { 311 t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", len(testCases)+1, instanceType, testCase.expectedError.Error(), actualErr.Error()) 312 } 313 } 314 } 315 316 // Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. 317 func TestObjectAPIPutObjectStaleFiles(t *testing.T) { 318 ExecObjectLayerStaleFilesTest(t, testObjectAPIPutObjectStaleFiles) 319 } 320 321 // Tests validate correctness of PutObject. 322 func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { 323 // Generating cases for which the PutObject fails. 324 bucket := "minio-bucket" 325 object := "minio-object" 326 327 // Create bucket. 328 err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) 329 if err != nil { 330 // Failed to create newbucket, abort. 331 t.Fatalf("%s : %s", instanceType, err.Error()) 332 } 333 334 data := []byte("hello, world") 335 // Create object. 336 _, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) 337 if err != nil { 338 // Failed to create object, abort. 339 t.Fatalf("%s : %s", instanceType, err.Error()) 340 } 341 342 for _, disk := range disks { 343 tmpMetaDir := path.Join(disk, minioMetaTmpBucket) 344 files, err := ioutil.ReadDir(tmpMetaDir) 345 if err != nil { 346 t.Fatal(err) 347 } 348 var found bool 349 for _, fi := range files { 350 if fi.Name() == ".trash" { 351 continue 352 } 353 found = true 354 } 355 if found { 356 t.Fatalf("%s: expected: empty, got: non-empty %#v", minioMetaTmpBucket, files) 357 } 358 } 359 } 360 361 // Wrapper for calling Multipart PutObject tests for both Erasure multiple disks and single node setup. 362 func TestObjectAPIMultipartPutObjectStaleFiles(t *testing.T) { 363 ExecObjectLayerStaleFilesTest(t, testObjectAPIMultipartPutObjectStaleFiles) 364 } 365 366 // Tests validate correctness of PutObject. 367 func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { 368 // Generating cases for which the PutObject fails. 369 bucket := "minio-bucket" 370 object := "minio-object" 371 372 // Create bucket. 373 err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) 374 if err != nil { 375 // Failed to create newbucket, abort. 376 t.Fatalf("%s : %s", instanceType, err.Error()) 377 } 378 opts := ObjectOptions{} 379 // Initiate Multipart Upload on the above created bucket. 380 uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts) 381 if err != nil { 382 // Failed to create NewMultipartUpload, abort. 383 t.Fatalf("%s : %s", instanceType, err.Error()) 384 } 385 386 // Upload part1. 387 fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte) 388 md5Writer := md5.New() 389 md5Writer.Write(fiveMBBytes) 390 etag1 := hex.EncodeToString(md5Writer.Sum(nil)) 391 sha256sum := "" 392 _, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum), opts) 393 if err != nil { 394 // Failed to upload object part, abort. 395 t.Fatalf("%s : %s", instanceType, err.Error()) 396 } 397 398 // Upload part2. 399 data := []byte("hello, world") 400 md5Writer = md5.New() 401 md5Writer.Write(data) 402 etag2 := hex.EncodeToString(md5Writer.Sum(nil)) 403 _, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum), opts) 404 if err != nil { 405 // Failed to upload object part, abort. 406 t.Fatalf("%s : %s", instanceType, err.Error()) 407 } 408 409 // Complete multipart. 410 parts := []CompletePart{ 411 {ETag: etag1, PartNumber: 1}, 412 {ETag: etag2, PartNumber: 2}, 413 } 414 _, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, parts, ObjectOptions{}) 415 if err != nil { 416 // Failed to complete multipart upload, abort. 417 t.Fatalf("%s : %s", instanceType, err.Error()) 418 } 419 420 for _, disk := range disks { 421 tmpMetaDir := path.Join(disk, minioMetaTmpBucket) 422 files, err := ioutil.ReadDir(tmpMetaDir) 423 if err != nil { 424 // Its OK to have non-existen tmpMetaDir. 425 if osIsNotExist(err) { 426 continue 427 } 428 429 // Print the error 430 t.Errorf("%s", err) 431 } 432 433 var found bool 434 for _, fi := range files { 435 if fi.Name() == ".trash" { 436 continue 437 } 438 found = true 439 break 440 } 441 442 if found { 443 t.Fatalf("%s: expected: empty, got: non-empty. content: %#v", tmpMetaDir, files) 444 } 445 } 446 } 447 448 // Benchmarks for ObjectLayer.PutObject(). 449 // The intent is to benchmark PutObject for various sizes ranging from few bytes to 100MB. 450 // Also each of these Benchmarks are run both Erasure and FS backends. 451 452 // BenchmarkPutObjectVerySmallFS - Benchmark FS.PutObject() for object size of 10 bytes. 453 func BenchmarkPutObjectVerySmallFS(b *testing.B) { 454 benchmarkPutObject(b, "FS", 10) 455 } 456 457 // BenchmarkPutObjectVerySmallErasure - Benchmark Erasure.PutObject() for object size of 10 bytes. 458 func BenchmarkPutObjectVerySmallErasure(b *testing.B) { 459 benchmarkPutObject(b, "Erasure", 10) 460 } 461 462 // BenchmarkPutObject10KbFS - Benchmark FS.PutObject() for object size of 10KB. 463 func BenchmarkPutObject10KbFS(b *testing.B) { 464 benchmarkPutObject(b, "FS", 10*humanize.KiByte) 465 } 466 467 // BenchmarkPutObject10KbErasure - Benchmark Erasure.PutObject() for object size of 10KB. 468 func BenchmarkPutObject10KbErasure(b *testing.B) { 469 benchmarkPutObject(b, "Erasure", 10*humanize.KiByte) 470 } 471 472 // BenchmarkPutObject100KbFS - Benchmark FS.PutObject() for object size of 100KB. 473 func BenchmarkPutObject100KbFS(b *testing.B) { 474 benchmarkPutObject(b, "FS", 100*humanize.KiByte) 475 } 476 477 // BenchmarkPutObject100KbErasure - Benchmark Erasure.PutObject() for object size of 100KB. 478 func BenchmarkPutObject100KbErasure(b *testing.B) { 479 benchmarkPutObject(b, "Erasure", 100*humanize.KiByte) 480 } 481 482 // BenchmarkPutObject1MbFS - Benchmark FS.PutObject() for object size of 1MB. 483 func BenchmarkPutObject1MbFS(b *testing.B) { 484 benchmarkPutObject(b, "FS", 1*humanize.MiByte) 485 } 486 487 // BenchmarkPutObject1MbErasure - Benchmark Erasure.PutObject() for object size of 1MB. 488 func BenchmarkPutObject1MbErasure(b *testing.B) { 489 benchmarkPutObject(b, "Erasure", 1*humanize.MiByte) 490 } 491 492 // BenchmarkPutObject5MbFS - Benchmark FS.PutObject() for object size of 5MB. 493 func BenchmarkPutObject5MbFS(b *testing.B) { 494 benchmarkPutObject(b, "FS", 5*humanize.MiByte) 495 } 496 497 // BenchmarkPutObject5MbErasure - Benchmark Erasure.PutObject() for object size of 5MB. 498 func BenchmarkPutObject5MbErasure(b *testing.B) { 499 benchmarkPutObject(b, "Erasure", 5*humanize.MiByte) 500 } 501 502 // BenchmarkPutObject10MbFS - Benchmark FS.PutObject() for object size of 10MB. 503 func BenchmarkPutObject10MbFS(b *testing.B) { 504 benchmarkPutObject(b, "FS", 10*humanize.MiByte) 505 } 506 507 // BenchmarkPutObject10MbErasure - Benchmark Erasure.PutObject() for object size of 10MB. 508 func BenchmarkPutObject10MbErasure(b *testing.B) { 509 benchmarkPutObject(b, "Erasure", 10*humanize.MiByte) 510 } 511 512 // BenchmarkPutObject25MbFS - Benchmark FS.PutObject() for object size of 25MB. 513 func BenchmarkPutObject25MbFS(b *testing.B) { 514 benchmarkPutObject(b, "FS", 25*humanize.MiByte) 515 516 } 517 518 // BenchmarkPutObject25MbErasure - Benchmark Erasure.PutObject() for object size of 25MB. 519 func BenchmarkPutObject25MbErasure(b *testing.B) { 520 benchmarkPutObject(b, "Erasure", 25*humanize.MiByte) 521 } 522 523 // BenchmarkPutObject50MbFS - Benchmark FS.PutObject() for object size of 50MB. 524 func BenchmarkPutObject50MbFS(b *testing.B) { 525 benchmarkPutObject(b, "FS", 50*humanize.MiByte) 526 } 527 528 // BenchmarkPutObject50MbErasure - Benchmark Erasure.PutObject() for object size of 50MB. 529 func BenchmarkPutObject50MbErasure(b *testing.B) { 530 benchmarkPutObject(b, "Erasure", 50*humanize.MiByte) 531 } 532 533 // parallel benchmarks for ObjectLayer.PutObject() . 534 535 // BenchmarkParallelPutObjectVerySmallFS - BenchmarkParallel FS.PutObject() for object size of 10 bytes. 536 func BenchmarkParallelPutObjectVerySmallFS(b *testing.B) { 537 benchmarkPutObjectParallel(b, "FS", 10) 538 } 539 540 // BenchmarkParallelPutObjectVerySmallErasure - BenchmarkParallel Erasure.PutObject() for object size of 10 bytes. 541 func BenchmarkParallelPutObjectVerySmallErasure(b *testing.B) { 542 benchmarkPutObjectParallel(b, "Erasure", 10) 543 } 544 545 // BenchmarkParallelPutObject10KbFS - BenchmarkParallel FS.PutObject() for object size of 10KB. 546 func BenchmarkParallelPutObject10KbFS(b *testing.B) { 547 benchmarkPutObjectParallel(b, "FS", 10*humanize.KiByte) 548 } 549 550 // BenchmarkParallelPutObject10KbErasure - BenchmarkParallel Erasure.PutObject() for object size of 10KB. 551 func BenchmarkParallelPutObject10KbErasure(b *testing.B) { 552 benchmarkPutObjectParallel(b, "Erasure", 10*humanize.KiByte) 553 } 554 555 // BenchmarkParallelPutObject100KbFS - BenchmarkParallel FS.PutObject() for object size of 100KB. 556 func BenchmarkParallelPutObject100KbFS(b *testing.B) { 557 benchmarkPutObjectParallel(b, "FS", 100*humanize.KiByte) 558 } 559 560 // BenchmarkParallelPutObject100KbErasure - BenchmarkParallel Erasure.PutObject() for object size of 100KB. 561 func BenchmarkParallelPutObject100KbErasure(b *testing.B) { 562 benchmarkPutObjectParallel(b, "Erasure", 100*humanize.KiByte) 563 } 564 565 // BenchmarkParallelPutObject1MbFS - BenchmarkParallel FS.PutObject() for object size of 1MB. 566 func BenchmarkParallelPutObject1MbFS(b *testing.B) { 567 benchmarkPutObjectParallel(b, "FS", 1*humanize.MiByte) 568 } 569 570 // BenchmarkParallelPutObject1MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 1MB. 571 func BenchmarkParallelPutObject1MbErasure(b *testing.B) { 572 benchmarkPutObjectParallel(b, "Erasure", 1*humanize.MiByte) 573 } 574 575 // BenchmarkParallelPutObject5MbFS - BenchmarkParallel FS.PutObject() for object size of 5MB. 576 func BenchmarkParallelPutObject5MbFS(b *testing.B) { 577 benchmarkPutObjectParallel(b, "FS", 5*humanize.MiByte) 578 } 579 580 // BenchmarkParallelPutObject5MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 5MB. 581 func BenchmarkParallelPutObject5MbErasure(b *testing.B) { 582 benchmarkPutObjectParallel(b, "Erasure", 5*humanize.MiByte) 583 } 584 585 // BenchmarkParallelPutObject10MbFS - BenchmarkParallel FS.PutObject() for object size of 10MB. 586 func BenchmarkParallelPutObject10MbFS(b *testing.B) { 587 benchmarkPutObjectParallel(b, "FS", 10*humanize.MiByte) 588 } 589 590 // BenchmarkParallelPutObject10MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 10MB. 591 func BenchmarkParallelPutObject10MbErasure(b *testing.B) { 592 benchmarkPutObjectParallel(b, "Erasure", 10*humanize.MiByte) 593 } 594 595 // BenchmarkParallelPutObject25MbFS - BenchmarkParallel FS.PutObject() for object size of 25MB. 596 func BenchmarkParallelPutObject25MbFS(b *testing.B) { 597 benchmarkPutObjectParallel(b, "FS", 25*humanize.MiByte) 598 599 } 600 601 // BenchmarkParallelPutObject25MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 25MB. 602 func BenchmarkParallelPutObject25MbErasure(b *testing.B) { 603 benchmarkPutObjectParallel(b, "Erasure", 25*humanize.MiByte) 604 }