github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/erasure-healing-common_test.go (about)

     1  // Copyright (c) 2015-2021 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package cmd
    19  
    20  import (
    21  	"bytes"
    22  	"context"
    23  	"fmt"
    24  	"os"
    25  	"path/filepath"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/minio/madmin-go/v3"
    30  )
    31  
    32  // Returns the latest updated FileInfo files and error in case of failure.
    33  func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, defaultParityCount int, errs []error) (FileInfo, error) {
    34  	// There should be at least half correct entries, if not return failure
    35  	expectedRQuorum := len(partsMetadata) / 2
    36  	if defaultParityCount == 0 {
    37  		// if parity count is '0', we expected all entries to be present.
    38  		expectedRQuorum = len(partsMetadata)
    39  	}
    40  
    41  	reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, expectedRQuorum)
    42  	if reducedErr != nil {
    43  		return FileInfo{}, reducedErr
    44  	}
    45  
    46  	// List all the file commit ids from parts metadata.
    47  	modTimes := listObjectModtimes(partsMetadata, errs)
    48  
    49  	// Count all latest updated FileInfo values
    50  	var count int
    51  	var latestFileInfo FileInfo
    52  
    53  	// Reduce list of UUIDs to a single common value - i.e. the last updated Time
    54  	modTime := commonTime(modTimes, expectedRQuorum)
    55  
    56  	if modTime.IsZero() || modTime.Equal(timeSentinel) {
    57  		return FileInfo{}, errErasureReadQuorum
    58  	}
    59  
    60  	// Iterate through all the modTimes and count the FileInfo(s) with latest time.
    61  	for index, t := range modTimes {
    62  		if partsMetadata[index].IsValid() && t.Equal(modTime) {
    63  			latestFileInfo = partsMetadata[index]
    64  			count++
    65  		}
    66  	}
    67  
    68  	if !latestFileInfo.IsValid() {
    69  		return FileInfo{}, errErasureReadQuorum
    70  	}
    71  
    72  	if count < latestFileInfo.Erasure.DataBlocks {
    73  		return FileInfo{}, errErasureReadQuorum
    74  	}
    75  
    76  	return latestFileInfo, nil
    77  }
    78  
    79  // validates functionality provided to find most common
    80  // time occurrence from a list of time.
    81  func TestCommonTime(t *testing.T) {
    82  	// List of test cases for common modTime.
    83  	testCases := []struct {
    84  		times  []time.Time
    85  		time   time.Time
    86  		quorum int
    87  	}{
    88  		{
    89  			// 1. Tests common times when slice has varying time elements.
    90  			[]time.Time{
    91  				time.Unix(0, 1).UTC(),
    92  				time.Unix(0, 2).UTC(),
    93  				time.Unix(0, 3).UTC(),
    94  				time.Unix(0, 3).UTC(),
    95  				time.Unix(0, 2).UTC(),
    96  				time.Unix(0, 3).UTC(),
    97  				time.Unix(0, 1).UTC(),
    98  			},
    99  			time.Unix(0, 3).UTC(),
   100  			3,
   101  		},
   102  		{
   103  			// 2. Tests common time obtained when all elements are equal.
   104  			[]time.Time{
   105  				time.Unix(0, 3).UTC(),
   106  				time.Unix(0, 3).UTC(),
   107  				time.Unix(0, 3).UTC(),
   108  				time.Unix(0, 3).UTC(),
   109  				time.Unix(0, 3).UTC(),
   110  				time.Unix(0, 3).UTC(),
   111  				time.Unix(0, 3).UTC(),
   112  			},
   113  			time.Unix(0, 3).UTC(),
   114  			4,
   115  		},
   116  		{
   117  			// 3. Tests common time obtained when elements have a mixture of
   118  			// sentinel values and don't have read quorum on any of the values.
   119  			[]time.Time{
   120  				time.Unix(0, 3).UTC(),
   121  				time.Unix(0, 3).UTC(),
   122  				time.Unix(0, 2).UTC(),
   123  				time.Unix(0, 1).UTC(),
   124  				time.Unix(0, 3).UTC(),
   125  				time.Unix(0, 4).UTC(),
   126  				time.Unix(0, 3).UTC(),
   127  				timeSentinel,
   128  				timeSentinel,
   129  				timeSentinel,
   130  			},
   131  			timeSentinel,
   132  			5,
   133  		},
   134  	}
   135  
   136  	// Tests all the testcases, and validates them against expected
   137  	// common modtime. Tests fail if modtime does not match.
   138  	for i, testCase := range testCases {
   139  		// Obtain a common mod time from modTimes slice.
   140  		ctime := commonTime(testCase.times, testCase.quorum)
   141  		if !testCase.time.Equal(ctime) {
   142  			t.Errorf("Test case %d, expect to pass but failed. Wanted modTime: %s, got modTime: %s\n", i+1, testCase.time, ctime)
   143  		}
   144  	}
   145  }
   146  
   147  // TestListOnlineDisks - checks if listOnlineDisks and outDatedDisks
   148  // are consistent with each other.
   149  func TestListOnlineDisks(t *testing.T) {
   150  	ctx, cancel := context.WithCancel(context.Background())
   151  	defer cancel()
   152  
   153  	obj, disks, err := prepareErasure16(ctx)
   154  	if err != nil {
   155  		t.Fatalf("Prepare Erasure backend failed - %v", err)
   156  	}
   157  	setObjectLayer(obj)
   158  	defer obj.Shutdown(context.Background())
   159  	defer removeRoots(disks)
   160  
   161  	type tamperKind int
   162  	const (
   163  		noTamper tamperKind = iota
   164  		deletePart
   165  		corruptPart
   166  	)
   167  
   168  	timeSentinel := time.Unix(1, 0).UTC()
   169  	threeNanoSecs := time.Unix(3, 0).UTC()
   170  	fourNanoSecs := time.Unix(4, 0).UTC()
   171  	modTimesThreeNone := make([]time.Time, 16)
   172  	modTimesThreeFour := make([]time.Time, 16)
   173  	for i := 0; i < 16; i++ {
   174  		// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
   175  		// to be tampered with.
   176  		if i > 12 {
   177  			modTimesThreeFour[i] = fourNanoSecs
   178  			modTimesThreeNone[i] = timeSentinel
   179  			continue
   180  		}
   181  		modTimesThreeFour[i] = threeNanoSecs
   182  		modTimesThreeNone[i] = threeNanoSecs
   183  	}
   184  
   185  	testCases := []struct {
   186  		modTimes       []time.Time
   187  		expectedTime   time.Time
   188  		errs           []error
   189  		_tamperBackend tamperKind
   190  	}{
   191  		{
   192  			modTimes:     modTimesThreeFour,
   193  			expectedTime: threeNanoSecs,
   194  			errs: []error{
   195  				nil, nil, nil, nil, nil, nil, nil, nil,
   196  				nil, nil, nil, nil, nil, nil, nil, nil,
   197  			},
   198  			_tamperBackend: noTamper,
   199  		},
   200  		{
   201  			modTimes:     modTimesThreeNone,
   202  			expectedTime: threeNanoSecs,
   203  			errs: []error{
   204  				// Disks that have a valid xl.meta.
   205  				nil, nil, nil, nil, nil, nil, nil, nil,
   206  				nil, nil, nil, nil, nil,
   207  				// Some disks can't access xl.meta.
   208  				errFileNotFound, errDiskAccessDenied, errDiskNotFound,
   209  			},
   210  			_tamperBackend: deletePart,
   211  		},
   212  		{
   213  			modTimes:     modTimesThreeNone,
   214  			expectedTime: threeNanoSecs,
   215  			errs: []error{
   216  				// Disks that have a valid xl.meta.
   217  				nil, nil, nil, nil, nil, nil, nil, nil,
   218  				nil, nil, nil, nil, nil,
   219  				// Some disks don't have xl.meta.
   220  				errDiskNotFound, errFileNotFound, errFileNotFound,
   221  			},
   222  			_tamperBackend: corruptPart,
   223  		},
   224  	}
   225  
   226  	bucket := "bucket"
   227  	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{})
   228  	if err != nil {
   229  		t.Fatalf("Failed to make a bucket %v", err)
   230  	}
   231  
   232  	object := "object"
   233  	data := bytes.Repeat([]byte("a"), smallFileThreshold*16)
   234  	z := obj.(*erasureServerPools)
   235  
   236  	erasureDisks, err := z.GetDisks(0, 0)
   237  	if err != nil {
   238  		t.Fatal(err)
   239  	}
   240  
   241  	for i, test := range testCases {
   242  		test := test
   243  		t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
   244  			_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
   245  			if err != nil {
   246  				t.Fatalf("Failed to putObject %v", err)
   247  			}
   248  
   249  			partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
   250  			fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs)
   251  			if err != nil {
   252  				t.Fatalf("Failed to getLatestFileInfo %v", err)
   253  			}
   254  
   255  			for j := range partsMetadata {
   256  				if errs[j] != nil {
   257  					t.Fatalf("expected error to be nil: %s", errs[j])
   258  				}
   259  				partsMetadata[j].ModTime = test.modTimes[j]
   260  			}
   261  
   262  			tamperedIndex := -1
   263  			switch test._tamperBackend {
   264  			case deletePart:
   265  				for index, err := range test.errs {
   266  					if err != nil {
   267  						continue
   268  					}
   269  					// Remove a part from a disk
   270  					// which has a valid xl.meta,
   271  					// and check if that disk
   272  					// appears in outDatedDisks.
   273  					tamperedIndex = index
   274  					dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
   275  						Recursive: false,
   276  						Immediate: false,
   277  					})
   278  					if dErr != nil {
   279  						t.Fatalf("Failed to delete %s - %v", filepath.Join(object, "part.1"), dErr)
   280  					}
   281  					break
   282  				}
   283  			case corruptPart:
   284  				for index, err := range test.errs {
   285  					if err != nil {
   286  						continue
   287  					}
   288  					// Corrupt a part from a disk
   289  					// which has a valid xl.meta,
   290  					// and check if that disk
   291  					// appears in outDatedDisks.
   292  					tamperedIndex = index
   293  					filePath := pathJoin(erasureDisks[index].String(), bucket, object, fi.DataDir, "part.1")
   294  					f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
   295  					if err != nil {
   296  						t.Fatalf("Failed to open %s: %s\n", filePath, err)
   297  					}
   298  					f.WriteString("oops") // Will cause bitrot error
   299  					f.Close()
   300  					break
   301  				}
   302  
   303  			}
   304  
   305  			rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount
   306  			onlineDisks, modTime, _ := listOnlineDisks(erasureDisks, partsMetadata, test.errs, rQuorum)
   307  			if !modTime.Equal(test.expectedTime) {
   308  				t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
   309  					test.expectedTime, modTime)
   310  			}
   311  			availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata,
   312  				test.errs, fi, bucket, object, madmin.HealDeepScan)
   313  			test.errs = newErrs
   314  
   315  			if test._tamperBackend != noTamper {
   316  				if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
   317  					t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data",
   318  						erasureDisks[tamperedIndex])
   319  				}
   320  			}
   321  		})
   322  	}
   323  }
   324  
   325  // TestListOnlineDisksSmallObjects - checks if listOnlineDisks and outDatedDisks
   326  // are consistent with each other.
   327  func TestListOnlineDisksSmallObjects(t *testing.T) {
   328  	ctx, cancel := context.WithCancel(context.Background())
   329  	defer cancel()
   330  
   331  	obj, disks, err := prepareErasure16(ctx)
   332  	if err != nil {
   333  		t.Fatalf("Prepare Erasure backend failed - %v", err)
   334  	}
   335  	setObjectLayer(obj)
   336  	defer obj.Shutdown(context.Background())
   337  	defer removeRoots(disks)
   338  
   339  	type tamperKind int
   340  	const (
   341  		noTamper    tamperKind = iota
   342  		deletePart  tamperKind = iota
   343  		corruptPart tamperKind = iota
   344  	)
   345  	timeSentinel := time.Unix(1, 0).UTC()
   346  	threeNanoSecs := time.Unix(3, 0).UTC()
   347  	fourNanoSecs := time.Unix(4, 0).UTC()
   348  	modTimesThreeNone := make([]time.Time, 16)
   349  	modTimesThreeFour := make([]time.Time, 16)
   350  	for i := 0; i < 16; i++ {
   351  		// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
   352  		// to be tampered with.
   353  		if i > 12 {
   354  			modTimesThreeFour[i] = fourNanoSecs
   355  			modTimesThreeNone[i] = timeSentinel
   356  			continue
   357  		}
   358  		modTimesThreeFour[i] = threeNanoSecs
   359  		modTimesThreeNone[i] = threeNanoSecs
   360  	}
   361  
   362  	testCases := []struct {
   363  		modTimes       []time.Time
   364  		expectedTime   time.Time
   365  		errs           []error
   366  		_tamperBackend tamperKind
   367  	}{
   368  		{
   369  			modTimes:     modTimesThreeFour,
   370  			expectedTime: threeNanoSecs,
   371  			errs: []error{
   372  				nil, nil, nil, nil, nil, nil, nil, nil,
   373  				nil, nil, nil, nil, nil, nil, nil, nil,
   374  			},
   375  			_tamperBackend: noTamper,
   376  		},
   377  		{
   378  			modTimes:     modTimesThreeNone,
   379  			expectedTime: threeNanoSecs,
   380  			errs: []error{
   381  				// Disks that have a valid xl.meta.
   382  				nil, nil, nil, nil, nil, nil, nil, nil,
   383  				nil, nil, nil, nil, nil,
   384  				// Some disks can't access xl.meta.
   385  				errFileNotFound, errDiskAccessDenied, errDiskNotFound,
   386  			},
   387  			_tamperBackend: deletePart,
   388  		},
   389  		{
   390  			modTimes:     modTimesThreeNone,
   391  			expectedTime: threeNanoSecs,
   392  			errs: []error{
   393  				// Disks that have a valid xl.meta.
   394  				nil, nil, nil, nil, nil, nil, nil, nil,
   395  				nil, nil, nil, nil, nil,
   396  				// Some disks don't have xl.meta.
   397  				errDiskNotFound, errFileNotFound, errFileNotFound,
   398  			},
   399  			_tamperBackend: corruptPart,
   400  		},
   401  	}
   402  
   403  	bucket := "bucket"
   404  	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{})
   405  	if err != nil {
   406  		t.Fatalf("Failed to make a bucket %v", err)
   407  	}
   408  
   409  	object := "object"
   410  	data := bytes.Repeat([]byte("a"), smallFileThreshold/2)
   411  	z := obj.(*erasureServerPools)
   412  
   413  	erasureDisks, err := z.GetDisks(0, 0)
   414  	if err != nil {
   415  		t.Fatal(err)
   416  	}
   417  
   418  	for i, test := range testCases {
   419  		test := test
   420  		t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
   421  			_, err := obj.PutObject(ctx, bucket, object,
   422  				mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
   423  			if err != nil {
   424  				t.Fatalf("Failed to putObject %v", err)
   425  			}
   426  
   427  			partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", true, true)
   428  			fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs)
   429  			if err != nil {
   430  				t.Fatalf("Failed to getLatestFileInfo %v", err)
   431  			}
   432  
   433  			for j := range partsMetadata {
   434  				if errs[j] != nil {
   435  					t.Fatalf("expected error to be nil: %s", errs[j])
   436  				}
   437  				partsMetadata[j].ModTime = test.modTimes[j]
   438  			}
   439  
   440  			if erasureDisks, err = writeUniqueFileInfo(ctx, erasureDisks, "", bucket, object, partsMetadata, diskCount(erasureDisks)); err != nil {
   441  				t.Fatal(ctx, err)
   442  			}
   443  
   444  			tamperedIndex := -1
   445  			switch test._tamperBackend {
   446  			case deletePart:
   447  				for index, err := range test.errs {
   448  					if err != nil {
   449  						continue
   450  					}
   451  					// Remove a part from a disk
   452  					// which has a valid xl.meta,
   453  					// and check if that disk
   454  					// appears in outDatedDisks.
   455  					tamperedIndex = index
   456  					dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
   457  						Recursive: false,
   458  						Immediate: false,
   459  					})
   460  					if dErr != nil {
   461  						t.Fatalf("Failed to delete %s - %v", pathJoin(object, xlStorageFormatFile), dErr)
   462  					}
   463  					break
   464  				}
   465  			case corruptPart:
   466  				for index, err := range test.errs {
   467  					if err != nil {
   468  						continue
   469  					}
   470  					// Corrupt a part from a disk
   471  					// which has a valid xl.meta,
   472  					// and check if that disk
   473  					// appears in outDatedDisks.
   474  					tamperedIndex = index
   475  					filePath := pathJoin(erasureDisks[index].String(), bucket, object, xlStorageFormatFile)
   476  					f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
   477  					if err != nil {
   478  						t.Fatalf("Failed to open %s: %s\n", filePath, err)
   479  					}
   480  					f.WriteString("oops") // Will cause bitrot error
   481  					f.Close()
   482  					break
   483  				}
   484  
   485  			}
   486  
   487  			rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount
   488  			onlineDisks, modTime, _ := listOnlineDisks(erasureDisks, partsMetadata, test.errs, rQuorum)
   489  			if !modTime.Equal(test.expectedTime) {
   490  				t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
   491  					test.expectedTime, modTime)
   492  			}
   493  
   494  			availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata,
   495  				test.errs, fi, bucket, object, madmin.HealDeepScan)
   496  			test.errs = newErrs
   497  
   498  			if test._tamperBackend != noTamper {
   499  				if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
   500  					t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data",
   501  						erasureDisks[tamperedIndex])
   502  				}
   503  			}
   504  		})
   505  	}
   506  }
   507  
   508  func TestDisksWithAllParts(t *testing.T) {
   509  	ctx, cancel := context.WithCancel(context.Background())
   510  	defer cancel()
   511  	obj, disks, err := prepareErasure16(ctx)
   512  	if err != nil {
   513  		t.Fatalf("Prepare Erasure backend failed - %v", err)
   514  	}
   515  	setObjectLayer(obj)
   516  	defer obj.Shutdown(context.Background())
   517  	defer removeRoots(disks)
   518  
   519  	bucket := "bucket"
   520  	object := "object"
   521  	// make data with more than one part
   522  	partCount := 3
   523  	data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount)
   524  	z := obj.(*erasureServerPools)
   525  	s := z.serverPools[0].sets[0]
   526  	erasureDisks := s.getDisks()
   527  	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{})
   528  	if err != nil {
   529  		t.Fatalf("Failed to make a bucket %v", err)
   530  	}
   531  
   532  	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
   533  	if err != nil {
   534  		t.Fatalf("Failed to putObject %v", err)
   535  	}
   536  
   537  	_, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
   538  	readQuorum := len(erasureDisks) / 2
   539  	if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
   540  		t.Fatalf("Failed to read xl meta data %v", reducedErr)
   541  	}
   542  
   543  	// Test 1: Test that all disks are returned without any failures with
   544  	// unmodified meta data
   545  	partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
   546  	if err != nil {
   547  		t.Fatalf("Failed to read xl meta data %v", err)
   548  	}
   549  
   550  	fi, err := getLatestFileInfo(ctx, partsMetadata, s.defaultParityCount, errs)
   551  	if err != nil {
   552  		t.Fatalf("Failed to get quorum consistent fileInfo %v", err)
   553  	}
   554  
   555  	erasureDisks, _, _ = listOnlineDisks(erasureDisks, partsMetadata, errs, readQuorum)
   556  
   557  	filteredDisks, errs, _ := disksWithAllParts(ctx, erasureDisks, partsMetadata,
   558  		errs, fi, bucket, object, madmin.HealDeepScan)
   559  
   560  	if len(filteredDisks) != len(erasureDisks) {
   561  		t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
   562  	}
   563  
   564  	for diskIndex, disk := range filteredDisks {
   565  		if errs[diskIndex] != nil {
   566  			t.Errorf("Unexpected error %s", errs[diskIndex])
   567  		}
   568  
   569  		if disk == nil {
   570  			t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
   571  		}
   572  	}
   573  
   574  	// Test 2: Not synchronized modtime
   575  	partsMetadataBackup := partsMetadata[0]
   576  	partsMetadata[0].ModTime = partsMetadata[0].ModTime.Add(-1 * time.Hour)
   577  
   578  	errs = make([]error, len(erasureDisks))
   579  	filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
   580  		errs, fi, bucket, object, madmin.HealDeepScan)
   581  
   582  	if len(filteredDisks) != len(erasureDisks) {
   583  		t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
   584  	}
   585  	for diskIndex, disk := range filteredDisks {
   586  		if diskIndex == 0 && disk != nil {
   587  			t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
   588  		}
   589  		if diskIndex != 0 && disk == nil {
   590  			t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
   591  		}
   592  	}
   593  	partsMetadata[0] = partsMetadataBackup // Revert before going to the next test
   594  
   595  	// Test 3: Not synchronized DataDir
   596  	partsMetadataBackup = partsMetadata[1]
   597  	partsMetadata[1].DataDir = "foo-random"
   598  
   599  	errs = make([]error, len(erasureDisks))
   600  	filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
   601  		errs, fi, bucket, object, madmin.HealDeepScan)
   602  
   603  	if len(filteredDisks) != len(erasureDisks) {
   604  		t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
   605  	}
   606  	for diskIndex, disk := range filteredDisks {
   607  		if diskIndex == 1 && disk != nil {
   608  			t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
   609  		}
   610  		if diskIndex != 1 && disk == nil {
   611  			t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
   612  		}
   613  	}
   614  	partsMetadata[1] = partsMetadataBackup // Revert before going to the next test
   615  
   616  	// Test 4: key = disk index, value = part name with hash mismatch
   617  	diskFailures := make(map[int]string)
   618  	diskFailures[0] = "part.1"
   619  	diskFailures[3] = "part.1"
   620  	diskFailures[15] = "part.1"
   621  
   622  	for diskIndex, partName := range diskFailures {
   623  		for i := range partsMetadata[diskIndex].Parts {
   624  			if fmt.Sprintf("part.%d", i+1) == partName {
   625  				filePath := pathJoin(erasureDisks[diskIndex].String(), bucket, object, partsMetadata[diskIndex].DataDir, partName)
   626  				f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
   627  				if err != nil {
   628  					t.Fatalf("Failed to open %s: %s\n", filePath, err)
   629  				}
   630  				f.WriteString("oops") // Will cause bitrot error
   631  				f.Close()
   632  			}
   633  		}
   634  	}
   635  
   636  	errs = make([]error, len(erasureDisks))
   637  	filteredDisks, errs, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
   638  		errs, fi, bucket, object, madmin.HealDeepScan)
   639  
   640  	if len(filteredDisks) != len(erasureDisks) {
   641  		t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
   642  	}
   643  
   644  	for diskIndex, disk := range filteredDisks {
   645  		if _, ok := diskFailures[diskIndex]; ok {
   646  			if disk != nil {
   647  				t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
   648  			}
   649  			if errs[diskIndex] == nil {
   650  				t.Errorf("Expected error not received, driveIndex: %d", diskIndex)
   651  			}
   652  		} else {
   653  			if disk == nil {
   654  				t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
   655  			}
   656  			if errs[diskIndex] != nil {
   657  				t.Errorf("Unexpected error, %s, driveIndex: %d", errs[diskIndex], diskIndex)
   658  			}
   659  
   660  		}
   661  	}
   662  }
   663  
   664  func TestCommonParities(t *testing.T) {
   665  	// This test uses two FileInfo values that represent the same object but
   666  	// have different parities. They occur in equal number of drives, but only
   667  	// one has read quorum. commonParity should pick the parity corresponding to
   668  	// the FileInfo which has read quorum.
   669  	fi1 := FileInfo{
   670  		Volume:         "mybucket",
   671  		Name:           "myobject",
   672  		VersionID:      "",
   673  		IsLatest:       true,
   674  		Deleted:        false,
   675  		ExpireRestored: false,
   676  		DataDir:        "4a01d9dd-0c5e-4103-88f8-b307c57d212e",
   677  		XLV1:           false,
   678  		ModTime:        time.Date(2023, time.March, 15, 11, 18, 4, 989906961, time.UTC),
   679  		Size:           329289, Mode: 0x0, WrittenByVersion: 0x63c77756,
   680  		Metadata: map[string]string{
   681  			"content-type": "application/octet-stream", "etag": "f205307ef9f50594c4b86d9c246bee86", "x-minio-internal-erasure-upgraded": "5->6", "x-minio-internal-inline-data": "true",
   682  		},
   683  		Parts: []ObjectPartInfo{
   684  			{
   685  				ETag:       "",
   686  				Number:     1,
   687  				Size:       329289,
   688  				ActualSize: 329289,
   689  				ModTime:    time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
   690  				Index:      []uint8(nil),
   691  				Checksums:  map[string]string(nil),
   692  			},
   693  		},
   694  		Erasure: ErasureInfo{
   695  			Algorithm:    "ReedSolomon",
   696  			DataBlocks:   6,
   697  			ParityBlocks: 6,
   698  			BlockSize:    1048576,
   699  			Index:        1,
   700  			Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
   701  			Checksums:    []ChecksumInfo{{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}}},
   702  		},
   703  		NumVersions: 1,
   704  		Idx:         0,
   705  	}
   706  
   707  	fi2 := FileInfo{
   708  		Volume:           "mybucket",
   709  		Name:             "myobject",
   710  		VersionID:        "",
   711  		IsLatest:         true,
   712  		Deleted:          false,
   713  		DataDir:          "6f5c106d-9d28-4c85-a7f4-eac56225876b",
   714  		ModTime:          time.Date(2023, time.March, 15, 19, 57, 30, 492530160, time.UTC),
   715  		Size:             329289,
   716  		Mode:             0x0,
   717  		WrittenByVersion: 0x63c77756,
   718  		Metadata:         map[string]string{"content-type": "application/octet-stream", "etag": "f205307ef9f50594c4b86d9c246bee86", "x-minio-internal-inline-data": "true"},
   719  		Parts: []ObjectPartInfo{
   720  			{
   721  				ETag:       "",
   722  				Number:     1,
   723  				Size:       329289,
   724  				ActualSize: 329289,
   725  				ModTime:    time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
   726  				Index:      []uint8(nil),
   727  				Checksums:  map[string]string(nil),
   728  			},
   729  		},
   730  		Erasure: ErasureInfo{
   731  			Algorithm:    "ReedSolomon",
   732  			DataBlocks:   7,
   733  			ParityBlocks: 5,
   734  			BlockSize:    1048576,
   735  			Index:        2,
   736  			Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
   737  			Checksums: []ChecksumInfo{
   738  				{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}},
   739  			},
   740  		},
   741  		NumVersions: 1,
   742  		Idx:         0,
   743  	}
   744  
   745  	fiDel := FileInfo{
   746  		Volume:           "mybucket",
   747  		Name:             "myobject",
   748  		VersionID:        "",
   749  		IsLatest:         true,
   750  		Deleted:          true,
   751  		ModTime:          time.Date(2023, time.March, 15, 19, 57, 30, 492530160, time.UTC),
   752  		Mode:             0x0,
   753  		WrittenByVersion: 0x63c77756,
   754  		NumVersions:      1,
   755  		Idx:              0,
   756  	}
   757  
   758  	tests := []struct {
   759  		fi1, fi2 FileInfo
   760  	}{
   761  		{
   762  			fi1: fi1,
   763  			fi2: fi2,
   764  		},
   765  		{
   766  			fi1: fi1,
   767  			fi2: fiDel,
   768  		},
   769  	}
   770  	for idx, test := range tests {
   771  		var metaArr []FileInfo
   772  		for i := 0; i < 12; i++ {
   773  			fi := test.fi1
   774  			if i%2 == 0 {
   775  				fi = test.fi2
   776  			}
   777  			metaArr = append(metaArr, fi)
   778  		}
   779  
   780  		parities := listObjectParities(metaArr, make([]error, len(metaArr)))
   781  		parity := commonParity(parities, 5)
   782  		var match int
   783  		for _, fi := range metaArr {
   784  			if fi.Erasure.ParityBlocks == parity {
   785  				match++
   786  			}
   787  		}
   788  		if match < len(metaArr)-parity {
   789  			t.Fatalf("Test %d: Expected %d drives with parity=%d, but got %d", idx, len(metaArr)-parity, parity, match)
   790  		}
   791  	}
   792  }