storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/erasure-object_test.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package cmd
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"errors"
    24  	"io"
    25  	"io/ioutil"
    26  	"os"
    27  	"strconv"
    28  	"testing"
    29  
    30  	humanize "github.com/dustin/go-humanize"
    31  
    32  	"storj.io/minio/cmd/config/storageclass"
    33  )
    34  
    35  func TestRepeatPutObjectPart(t *testing.T) {
    36  	ctx, cancel := context.WithCancel(context.Background())
    37  	defer cancel()
    38  
    39  	var objLayer ObjectLayer
    40  	var disks []string
    41  	var err error
    42  	var opts ObjectOptions
    43  
    44  	objLayer, disks, err = prepareErasure16(ctx)
    45  	if err != nil {
    46  		t.Fatal(err)
    47  	}
    48  
    49  	// cleaning up of temporary test directories
    50  	defer objLayer.Shutdown(context.Background())
    51  	defer removeRoots(disks)
    52  
    53  	err = objLayer.MakeBucketWithLocation(ctx, "bucket1", BucketOptions{})
    54  	if err != nil {
    55  		t.Fatal(err)
    56  	}
    57  
    58  	uploadID, err := objLayer.NewMultipartUpload(ctx, "bucket1", "mpartObj1", opts)
    59  	if err != nil {
    60  		t.Fatal(err)
    61  	}
    62  	fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
    63  	md5Hex := getMD5Hash(fiveMBBytes)
    64  	_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
    65  	if err != nil {
    66  		t.Fatal(err)
    67  	}
    68  	// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
    69  	_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
    70  	if err != nil {
    71  		t.Fatal(err)
    72  	}
    73  }
    74  
    75  func TestErasureDeleteObjectBasic(t *testing.T) {
    76  	testCases := []struct {
    77  		bucket      string
    78  		object      string
    79  		expectedErr error
    80  	}{
    81  		{".test", "dir/obj", BucketNameInvalid{Bucket: ".test"}},
    82  		{"----", "dir/obj", BucketNameInvalid{Bucket: "----"}},
    83  		{"bucket", "", ObjectNameInvalid{Bucket: "bucket", Object: ""}},
    84  		{"bucket", "doesnotexist", ObjectNotFound{Bucket: "bucket", Object: "doesnotexist"}},
    85  		{"bucket", "dir/doesnotexist", ObjectNotFound{Bucket: "bucket", Object: "dir/doesnotexist"}},
    86  		{"bucket", "dir", ObjectNotFound{Bucket: "bucket", Object: "dir"}},
    87  		{"bucket", "dir/", ObjectNotFound{Bucket: "bucket", Object: "dir/"}},
    88  		{"bucket", "dir/obj", nil},
    89  	}
    90  
    91  	ctx, cancel := context.WithCancel(context.Background())
    92  	defer cancel()
    93  
    94  	// Create an instance of xl backend
    95  	xl, fsDirs, err := prepareErasure16(ctx)
    96  	if err != nil {
    97  		t.Fatal(err)
    98  	}
    99  	defer xl.Shutdown(context.Background())
   100  
   101  	err = xl.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
   102  	if err != nil {
   103  		t.Fatal(err)
   104  	}
   105  
   106  	// Create object "dir/obj" under bucket "bucket" for Test 7 to pass
   107  	_, err = xl.PutObject(ctx, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
   108  	if err != nil {
   109  		t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
   110  	}
   111  	for _, test := range testCases {
   112  		test := test
   113  		t.Run("", func(t *testing.T) {
   114  			_, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{})
   115  			if err != nil {
   116  				t.Fatal("dir/obj not found before last test")
   117  			}
   118  			_, actualErr := xl.DeleteObject(ctx, test.bucket, test.object, ObjectOptions{})
   119  			if test.expectedErr != nil && actualErr != test.expectedErr {
   120  				t.Errorf("Expected to fail with %s, but failed with %s", test.expectedErr, actualErr)
   121  			}
   122  			if test.expectedErr == nil && actualErr != nil {
   123  				t.Errorf("Expected to pass, but failed with %s", actualErr)
   124  			}
   125  		})
   126  	}
   127  	// Cleanup backend directories
   128  	removeRoots(fsDirs)
   129  }
   130  
   131  func TestErasureDeleteObjectsErasureSet(t *testing.T) {
   132  	ctx, cancel := context.WithCancel(context.Background())
   133  	defer cancel()
   134  	var objs []*erasureObjects
   135  	for i := 0; i < 32; i++ {
   136  		obj, fsDirs, err := prepareErasure(ctx, 16)
   137  		if err != nil {
   138  			t.Fatal("Unable to initialize 'Erasure' object layer.", err)
   139  		}
   140  		// Remove all dirs.
   141  		for _, dir := range fsDirs {
   142  			defer os.RemoveAll(dir)
   143  		}
   144  		z := obj.(*erasureServerPools)
   145  		xl := z.serverPools[0].sets[0]
   146  		objs = append(objs, xl)
   147  	}
   148  
   149  	erasureSets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"}
   150  
   151  	type testCaseType struct {
   152  		bucket string
   153  		object string
   154  	}
   155  
   156  	bucketName := "bucket"
   157  	testCases := []testCaseType{
   158  		{bucketName, "dir/obj1"},
   159  		{bucketName, "dir/obj2"},
   160  		{bucketName, "obj3"},
   161  		{bucketName, "obj_4"},
   162  	}
   163  
   164  	err := erasureSets.MakeBucketWithLocation(ctx, bucketName, BucketOptions{})
   165  	if err != nil {
   166  		t.Fatal(err)
   167  	}
   168  
   169  	for _, testCase := range testCases {
   170  		_, err = erasureSets.PutObject(ctx, testCase.bucket, testCase.object,
   171  			mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
   172  		if err != nil {
   173  			t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
   174  		}
   175  	}
   176  
   177  	toObjectNames := func(testCases []testCaseType) []ObjectToDelete {
   178  		names := make([]ObjectToDelete, len(testCases))
   179  		for i := range testCases {
   180  			names[i] = ObjectToDelete{ObjectName: testCases[i].object}
   181  		}
   182  		return names
   183  	}
   184  
   185  	objectNames := toObjectNames(testCases)
   186  	_, delErrs := erasureSets.DeleteObjects(ctx, bucketName, objectNames, ObjectOptions{})
   187  
   188  	for i := range delErrs {
   189  		if delErrs[i] != nil {
   190  			t.Errorf("Failed to remove object `%v` with the error: `%v`", objectNames[i], delErrs[i])
   191  		}
   192  	}
   193  
   194  	for _, test := range testCases {
   195  		_, statErr := erasureSets.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{})
   196  		switch statErr.(type) {
   197  		case ObjectNotFound:
   198  		default:
   199  			t.Fatalf("Object %s is not removed", test.bucket+SlashSeparator+test.object)
   200  		}
   201  	}
   202  }
   203  
   204  func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
   205  	restoreGlobalStorageClass := globalStorageClass
   206  	defer func() {
   207  		globalStorageClass = restoreGlobalStorageClass
   208  	}()
   209  
   210  	globalStorageClass = storageclass.Config{}
   211  
   212  	ctx, cancel := context.WithCancel(context.Background())
   213  	defer cancel()
   214  
   215  	// Create an instance of xl backend.
   216  	obj, fsDirs, err := prepareErasure16(ctx)
   217  	if err != nil {
   218  		t.Fatal(err)
   219  	}
   220  	// Cleanup backend directories
   221  	defer obj.Shutdown(context.Background())
   222  	defer removeRoots(fsDirs)
   223  
   224  	z := obj.(*erasureServerPools)
   225  	xl := z.serverPools[0].sets[0]
   226  
   227  	// Create "bucket"
   228  	err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
   229  	if err != nil {
   230  		t.Fatal(err)
   231  	}
   232  
   233  	bucket := "bucket"
   234  	object := "object"
   235  	opts := ObjectOptions{}
   236  	// Create object "obj" under bucket "bucket".
   237  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
   238  	if err != nil {
   239  		t.Fatal(err)
   240  	}
   241  	// for a 16 disk setup, quorum is 9. To simulate disks not found yet
   242  	// quorum is available, we remove disks leaving quorum disks behind.
   243  	erasureDisks := xl.getDisks()
   244  	z.serverPools[0].erasureDisksMu.Lock()
   245  	xl.getDisks = func() []StorageAPI {
   246  		for i := range erasureDisks[:4] {
   247  			erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk)
   248  		}
   249  		return erasureDisks
   250  	}
   251  
   252  	z.serverPools[0].erasureDisksMu.Unlock()
   253  	_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{})
   254  	if err != nil {
   255  		t.Fatal(err)
   256  	}
   257  
   258  	// Create "obj" under "bucket".
   259  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
   260  	if err != nil {
   261  		t.Fatal(err)
   262  	}
   263  
   264  	// Remove one more disk to 'lose' quorum, by setting it to nil.
   265  	erasureDisks = xl.getDisks()
   266  	z.serverPools[0].erasureDisksMu.Lock()
   267  	xl.getDisks = func() []StorageAPI {
   268  		erasureDisks[7] = nil
   269  		erasureDisks[8] = nil
   270  		return erasureDisks
   271  	}
   272  
   273  	z.serverPools[0].erasureDisksMu.Unlock()
   274  	_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{})
   275  	// since majority of disks are not available, metaquorum is not achieved and hence errErasureWriteQuorum error
   276  	if !errors.Is(err, errErasureWriteQuorum) {
   277  		t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
   278  	}
   279  }
   280  
   281  func TestGetObjectNoQuorum(t *testing.T) {
   282  	ctx, cancel := context.WithCancel(context.Background())
   283  	defer cancel()
   284  
   285  	// Create an instance of xl backend.
   286  	obj, fsDirs, err := prepareErasure16(ctx)
   287  	if err != nil {
   288  		t.Fatal(err)
   289  	}
   290  	// Cleanup backend directories.
   291  	defer obj.Shutdown(context.Background())
   292  	defer removeRoots(fsDirs)
   293  
   294  	z := obj.(*erasureServerPools)
   295  	xl := z.serverPools[0].sets[0]
   296  
   297  	// Create "bucket"
   298  	err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
   299  	if err != nil {
   300  		t.Fatal(err)
   301  	}
   302  
   303  	bucket := "bucket"
   304  	object := "object"
   305  	opts := ObjectOptions{}
   306  	buf := make([]byte, smallFileThreshold*16)
   307  	if _, err = io.ReadFull(crand.Reader, buf); err != nil {
   308  		t.Fatal(err)
   309  	}
   310  
   311  	// Test use case 1: All disks are online, xl.meta are present, but data are missing
   312  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(buf), int64(len(buf)), "", ""), opts)
   313  	if err != nil {
   314  		t.Fatal(err)
   315  	}
   316  
   317  	for _, disk := range xl.getDisks() {
   318  		files, _ := disk.ListDir(ctx, bucket, object, -1)
   319  		for _, file := range files {
   320  			if file != "xl.meta" {
   321  				disk.Delete(ctx, bucket, pathJoin(object, file), true)
   322  			}
   323  		}
   324  	}
   325  
   326  	err = xl.GetObject(ctx, bucket, object, 0, int64(len(buf)), ioutil.Discard, "", opts)
   327  	if err != toObjectErr(errFileNotFound, bucket, object) {
   328  		t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
   329  	}
   330  
   331  	// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
   332  	// in a 16 disk Erasure setup. The original disks are 'replaced' with
   333  	// naughtyDisks that fail after 'f' successful StorageAPI method
   334  	// invocations, where f - [0,2)
   335  
   336  	// Create "object" under "bucket".
   337  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(buf), int64(len(buf)), "", ""), opts)
   338  	if err != nil {
   339  		t.Fatal(err)
   340  	}
   341  
   342  	for f := 0; f < 2; f++ {
   343  		diskErrors := make(map[int]error)
   344  		for i := 0; i <= f; i++ {
   345  			diskErrors[i] = nil
   346  		}
   347  		erasureDisks := xl.getDisks()
   348  		for i := range erasureDisks[:9] {
   349  			switch diskType := erasureDisks[i].(type) {
   350  			case *naughtyDisk:
   351  				erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
   352  			default:
   353  				erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk)
   354  			}
   355  		}
   356  		z.serverPools[0].erasureDisksMu.Lock()
   357  		xl.getDisks = func() []StorageAPI {
   358  			return erasureDisks
   359  		}
   360  		z.serverPools[0].erasureDisksMu.Unlock()
   361  		// Fetch object from store.
   362  		err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
   363  		if err != toObjectErr(errErasureReadQuorum, bucket, object) {
   364  			t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
   365  		}
   366  	}
   367  
   368  }
   369  
   370  func TestHeadObjectNoQuorum(t *testing.T) {
   371  	ctx, cancel := context.WithCancel(context.Background())
   372  	defer cancel()
   373  
   374  	// Create an instance of xl backend.
   375  	obj, fsDirs, err := prepareErasure16(ctx)
   376  	if err != nil {
   377  		t.Fatal(err)
   378  	}
   379  	// Cleanup backend directories.
   380  	defer obj.Shutdown(context.Background())
   381  	defer removeRoots(fsDirs)
   382  
   383  	z := obj.(*erasureServerPools)
   384  	xl := z.serverPools[0].sets[0]
   385  
   386  	// Create "bucket"
   387  	err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
   388  	if err != nil {
   389  		t.Fatal(err)
   390  	}
   391  
   392  	bucket := "bucket"
   393  	object := "object"
   394  	opts := ObjectOptions{}
   395  
   396  	// Test use case 1: All disks are online, xl.meta are present, but data are missing
   397  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
   398  	if err != nil {
   399  		t.Fatal(err)
   400  	}
   401  	for _, disk := range xl.getDisks() {
   402  		files, _ := disk.ListDir(ctx, bucket, object, -1)
   403  		for _, file := range files {
   404  			if file != "xl.meta" {
   405  				disk.Delete(ctx, bucket, pathJoin(object, file), true)
   406  			}
   407  		}
   408  	}
   409  
   410  	_, err = xl.GetObjectInfo(ctx, bucket, object, opts)
   411  	if err != nil {
   412  		t.Errorf("Expected StatObject to succeed if data dir are not found, but failed with %v", err)
   413  	}
   414  
   415  	// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
   416  	// in a 16 disk Erasure setup. The original disks are 'replaced' with
   417  	// naughtyDisks that fail after 'f' successful StorageAPI method
   418  	// invocations, where f - [0,2)
   419  
   420  	// Create "object" under "bucket".
   421  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
   422  	if err != nil {
   423  		t.Fatal(err)
   424  	}
   425  
   426  	erasureDisks := xl.getDisks()
   427  	for i := range erasureDisks[:10] {
   428  		erasureDisks[i] = nil
   429  	}
   430  
   431  	z.serverPools[0].erasureDisksMu.Lock()
   432  	xl.getDisks = func() []StorageAPI {
   433  		return erasureDisks
   434  	}
   435  	z.serverPools[0].erasureDisksMu.Unlock()
   436  
   437  	// Fetch object from store.
   438  	_, err = xl.GetObjectInfo(ctx, bucket, object, opts)
   439  	if err != toObjectErr(errErasureReadQuorum, bucket, object) {
   440  		t.Errorf("Expected getObjectInfo to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
   441  	}
   442  }
   443  
   444  func TestPutObjectNoQuorum(t *testing.T) {
   445  	ctx, cancel := context.WithCancel(context.Background())
   446  	defer cancel()
   447  
   448  	// Create an instance of xl backend.
   449  	obj, fsDirs, err := prepareErasure16(ctx)
   450  	if err != nil {
   451  		t.Fatal(err)
   452  	}
   453  
   454  	// Cleanup backend directories.
   455  	defer obj.Shutdown(context.Background())
   456  	defer removeRoots(fsDirs)
   457  
   458  	z := obj.(*erasureServerPools)
   459  	xl := z.serverPools[0].sets[0]
   460  
   461  	// Create "bucket"
   462  	err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
   463  	if err != nil {
   464  		t.Fatal(err)
   465  	}
   466  
   467  	bucket := "bucket"
   468  	object := "object"
   469  	opts := ObjectOptions{}
   470  	// Create "object" under "bucket".
   471  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{'a'}, smallFileThreshold*16)), smallFileThreshold*16, "", ""), opts)
   472  	if err != nil {
   473  		t.Fatal(err)
   474  	}
   475  
   476  	// Make 9 disks offline, which leaves less than quorum number of disks
   477  	// in a 16 disk Erasure setup. The original disks are 'replaced' with
   478  	// naughtyDisks that fail after 'f' successful StorageAPI method
   479  	// invocations, where f - [0,4)
   480  	for f := 0; f < 2; f++ {
   481  		diskErrors := make(map[int]error)
   482  		for i := 0; i <= f; i++ {
   483  			diskErrors[i] = nil
   484  		}
   485  		erasureDisks := xl.getDisks()
   486  		for i := range erasureDisks[:9] {
   487  			switch diskType := erasureDisks[i].(type) {
   488  			case *naughtyDisk:
   489  				erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
   490  			default:
   491  				erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk)
   492  			}
   493  		}
   494  		z.serverPools[0].erasureDisksMu.Lock()
   495  		xl.getDisks = func() []StorageAPI {
   496  			return erasureDisks
   497  		}
   498  		z.serverPools[0].erasureDisksMu.Unlock()
   499  		// Upload new content to same object "object"
   500  		_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{byte(f)}, smallFileThreshold*16)), smallFileThreshold*16, "", ""), opts)
   501  		if !errors.Is(err, errErasureWriteQuorum) {
   502  			t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
   503  		}
   504  	}
   505  }
   506  
   507  func TestPutObjectNoQuorumSmall(t *testing.T) {
   508  	ctx, cancel := context.WithCancel(context.Background())
   509  	defer cancel()
   510  
   511  	// Create an instance of xl backend.
   512  	obj, fsDirs, err := prepareErasure16(ctx)
   513  	if err != nil {
   514  		t.Fatal(err)
   515  	}
   516  
   517  	// Cleanup backend directories.
   518  	defer obj.Shutdown(context.Background())
   519  	defer removeRoots(fsDirs)
   520  
   521  	z := obj.(*erasureServerPools)
   522  	xl := z.serverPools[0].sets[0]
   523  
   524  	// Create "bucket"
   525  	err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
   526  	if err != nil {
   527  		t.Fatal(err)
   528  	}
   529  
   530  	bucket := "bucket"
   531  	object := "object"
   532  	opts := ObjectOptions{}
   533  	// Create "object" under "bucket".
   534  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{'a'}, smallFileThreshold/2)), smallFileThreshold/2, "", ""), opts)
   535  	if err != nil {
   536  		t.Fatal(err)
   537  	}
   538  
   539  	// Make 9 disks offline, which leaves less than quorum number of disks
   540  	// in a 16 disk Erasure setup. The original disks are 'replaced' with
   541  	// naughtyDisks that fail after 'f' successful StorageAPI method
   542  	// invocations, where f - [0,2)
   543  	for f := 0; f < 2; f++ {
   544  		t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) {
   545  			diskErrors := make(map[int]error)
   546  			for i := 0; i <= f; i++ {
   547  				diskErrors[i] = nil
   548  			}
   549  			erasureDisks := xl.getDisks()
   550  			for i := range erasureDisks[:9] {
   551  				switch diskType := erasureDisks[i].(type) {
   552  				case *naughtyDisk:
   553  					erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
   554  				default:
   555  					erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk)
   556  				}
   557  			}
   558  			z.serverPools[0].erasureDisksMu.Lock()
   559  			xl.getDisks = func() []StorageAPI {
   560  				return erasureDisks
   561  			}
   562  			z.serverPools[0].erasureDisksMu.Unlock()
   563  			// Upload new content to same object "object"
   564  			_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{byte(f)}, smallFileThreshold/2)), smallFileThreshold/2, "", ""), opts)
   565  			if !errors.Is(err, errErasureWriteQuorum) {
   566  				t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
   567  			}
   568  		})
   569  	}
   570  }
   571  
   572  func TestObjectQuorumFromMeta(t *testing.T) {
   573  	ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta)
   574  }
   575  
   576  func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) {
   577  	restoreGlobalStorageClass := globalStorageClass
   578  	defer func() {
   579  		globalStorageClass = restoreGlobalStorageClass
   580  	}()
   581  
   582  	globalStorageClass = storageclass.Config{}
   583  
   584  	bucket := getRandomBucketName()
   585  
   586  	var opts ObjectOptions
   587  	// make data with more than one part
   588  	partCount := 3
   589  	data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount)
   590  
   591  	z := obj.(*erasureServerPools)
   592  	xl := z.serverPools[0].sets[0]
   593  	erasureDisks := xl.getDisks()
   594  
   595  	ctx, cancel := context.WithCancel(GlobalContext)
   596  	defer cancel()
   597  
   598  	err := obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
   599  	if err != nil {
   600  		t.Fatalf("Failed to make a bucket %v", err)
   601  	}
   602  
   603  	// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
   604  	object1 := "object1"
   605  	_, err = obj.PutObject(ctx, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
   606  	if err != nil {
   607  		t.Fatalf("Failed to putObject %v", err)
   608  	}
   609  
   610  	parts1, errs1 := readAllFileInfo(ctx, erasureDisks, bucket, object1, "", false)
   611  	parts1SC := globalStorageClass
   612  
   613  	// Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class
   614  	object2 := "object2"
   615  	metadata2 := make(map[string]string)
   616  	metadata2["x-amz-storage-class"] = storageclass.RRS
   617  	_, err = obj.PutObject(ctx, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2})
   618  	if err != nil {
   619  		t.Fatalf("Failed to putObject %v", err)
   620  	}
   621  
   622  	parts2, errs2 := readAllFileInfo(ctx, erasureDisks, bucket, object2, "", false)
   623  	parts2SC := globalStorageClass
   624  
   625  	// Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class
   626  	object3 := "object3"
   627  	metadata3 := make(map[string]string)
   628  	metadata3["x-amz-storage-class"] = storageclass.STANDARD
   629  	_, err = obj.PutObject(ctx, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3})
   630  	if err != nil {
   631  		t.Fatalf("Failed to putObject %v", err)
   632  	}
   633  
   634  	parts3, errs3 := readAllFileInfo(ctx, erasureDisks, bucket, object3, "", false)
   635  	parts3SC := globalStorageClass
   636  
   637  	// Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class
   638  	object4 := "object4"
   639  	metadata4 := make(map[string]string)
   640  	metadata4["x-amz-storage-class"] = storageclass.STANDARD
   641  	globalStorageClass = storageclass.Config{
   642  		Standard: storageclass.StorageClass{
   643  			Parity: 6,
   644  		},
   645  	}
   646  
   647  	_, err = obj.PutObject(ctx, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4})
   648  	if err != nil {
   649  		t.Fatalf("Failed to putObject %v", err)
   650  	}
   651  
   652  	parts4, errs4 := readAllFileInfo(ctx, erasureDisks, bucket, object4, "", false)
   653  	parts4SC := storageclass.Config{
   654  		Standard: storageclass.StorageClass{
   655  			Parity: 6,
   656  		},
   657  	}
   658  
   659  	// Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class
   660  	// Reset global storage class flags
   661  	object5 := "object5"
   662  	metadata5 := make(map[string]string)
   663  	metadata5["x-amz-storage-class"] = storageclass.RRS
   664  	globalStorageClass = storageclass.Config{
   665  		RRS: storageclass.StorageClass{
   666  			Parity: 2,
   667  		},
   668  	}
   669  
   670  	_, err = obj.PutObject(ctx, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5})
   671  	if err != nil {
   672  		t.Fatalf("Failed to putObject %v", err)
   673  	}
   674  
   675  	parts5, errs5 := readAllFileInfo(ctx, erasureDisks, bucket, object5, "", false)
   676  	parts5SC := storageclass.Config{
   677  		RRS: storageclass.StorageClass{
   678  			Parity: 2,
   679  		},
   680  	}
   681  
   682  	// Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class
   683  	object6 := "object6"
   684  	metadata6 := make(map[string]string)
   685  	metadata6["x-amz-storage-class"] = storageclass.STANDARD
   686  	globalStorageClass = storageclass.Config{
   687  		RRS: storageclass.StorageClass{
   688  			Parity: 2,
   689  		},
   690  	}
   691  
   692  	_, err = obj.PutObject(ctx, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6})
   693  	if err != nil {
   694  		t.Fatalf("Failed to putObject %v", err)
   695  	}
   696  
   697  	parts6, errs6 := readAllFileInfo(ctx, erasureDisks, bucket, object6, "", false)
   698  	parts6SC := storageclass.Config{
   699  		RRS: storageclass.StorageClass{
   700  			Parity: 2,
   701  		},
   702  	}
   703  
   704  	// Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class
   705  	// Reset global storage class flags
   706  	object7 := "object7"
   707  	metadata7 := make(map[string]string)
   708  	metadata7["x-amz-storage-class"] = storageclass.STANDARD
   709  	globalStorageClass = storageclass.Config{
   710  		Standard: storageclass.StorageClass{
   711  			Parity: 5,
   712  		},
   713  	}
   714  
   715  	_, err = obj.PutObject(ctx, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7})
   716  	if err != nil {
   717  		t.Fatalf("Failed to putObject %v", err)
   718  	}
   719  
   720  	parts7, errs7 := readAllFileInfo(ctx, erasureDisks, bucket, object7, "", false)
   721  	parts7SC := storageclass.Config{
   722  		Standard: storageclass.StorageClass{
   723  			Parity: 5,
   724  		},
   725  	}
   726  
   727  	tests := []struct {
   728  		parts               []FileInfo
   729  		errs                []error
   730  		expectedReadQuorum  int
   731  		expectedWriteQuorum int
   732  		storageClassCfg     storageclass.Config
   733  		expectedError       error
   734  	}{
   735  		{parts1, errs1, 12, 12, parts1SC, nil},
   736  		{parts2, errs2, 14, 14, parts2SC, nil},
   737  		{parts3, errs3, 12, 12, parts3SC, nil},
   738  		{parts4, errs4, 10, 10, parts4SC, nil},
   739  		{parts5, errs5, 14, 14, parts5SC, nil},
   740  		{parts6, errs6, 12, 12, parts6SC, nil},
   741  		{parts7, errs7, 11, 11, parts7SC, nil},
   742  	}
   743  	for _, tt := range tests {
   744  		tt := tt
   745  		t.(*testing.T).Run("", func(t *testing.T) {
   746  			globalStorageClass = tt.storageClassCfg
   747  			actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, getDefaultParityBlocks(len(erasureDisks)))
   748  			if tt.expectedError != nil && err == nil {
   749  				t.Errorf("Expected %s, got %s", tt.expectedError, err)
   750  			}
   751  			if tt.expectedError == nil && err != nil {
   752  				t.Errorf("Expected %s, got %s", tt.expectedError, err)
   753  			}
   754  			if tt.expectedReadQuorum != actualReadQuorum {
   755  				t.Errorf("Expected Read Quorum %d, got %d", tt.expectedReadQuorum, actualReadQuorum)
   756  			}
   757  			if tt.expectedWriteQuorum != actualWriteQuorum {
   758  				t.Errorf("Expected Write Quorum %d, got %d", tt.expectedWriteQuorum, actualWriteQuorum)
   759  			}
   760  		})
   761  	}
   762  }