github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/xl-storage-format_test.go (about)

     1  // Copyright (c) 2015-2021 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package cmd
    19  
    20  import (
    21  	"bytes"
    22  	"encoding/hex"
    23  	"encoding/json"
    24  	"fmt"
    25  	"math/rand"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/dustin/go-humanize"
    30  	jsoniter "github.com/json-iterator/go"
    31  	xhttp "github.com/minio/minio/internal/http"
    32  )
    33  
    34  func TestIsXLMetaFormatValid(t *testing.T) {
    35  	tests := []struct {
    36  		name    int
    37  		version string
    38  		format  string
    39  		want    bool
    40  	}{
    41  		{1, "123", "fs", false},
    42  		{2, "123", xlMetaFormat, false},
    43  		{3, xlMetaVersion100, "test", false},
    44  		{4, xlMetaVersion101, "hello", false},
    45  		{5, xlMetaVersion100, xlMetaFormat, true},
    46  		{6, xlMetaVersion101, xlMetaFormat, true},
    47  	}
    48  	for _, tt := range tests {
    49  		if got := isXLMetaFormatValid(tt.version, tt.format); got != tt.want {
    50  			t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want)
    51  		}
    52  	}
    53  }
    54  
    55  func TestIsXLMetaErasureInfoValid(t *testing.T) {
    56  	tests := []struct {
    57  		name   int
    58  		data   int
    59  		parity int
    60  		want   bool
    61  	}{
    62  		{1, 5, 6, false},
    63  		{2, 5, 5, true},
    64  		{3, 0, 5, false},
    65  		{3, -1, 5, false},
    66  		{4, 5, -1, false},
    67  		{5, 5, 0, true},
    68  		{6, 5, 0, true},
    69  		{7, 5, 4, true},
    70  	}
    71  	for _, tt := range tests {
    72  		if got := isXLMetaErasureInfoValid(tt.data, tt.parity); got != tt.want {
    73  			t.Errorf("Test %d: Expected %v but received %v -> %#v", tt.name, got, tt.want, tt)
    74  		}
    75  	}
    76  }
    77  
    78  // newTestXLMetaV1 - initializes new xlMetaV1Object, adds version, allocates a fresh erasure info and metadata.
    79  func newTestXLMetaV1() xlMetaV1Object {
    80  	xlMeta := xlMetaV1Object{}
    81  	xlMeta.Version = xlMetaVersion101
    82  	xlMeta.Format = xlMetaFormat
    83  	xlMeta.Minio.Release = "test"
    84  	xlMeta.Erasure = ErasureInfo{
    85  		Algorithm:    "klauspost/reedsolomon/vandermonde",
    86  		DataBlocks:   5,
    87  		ParityBlocks: 5,
    88  		BlockSize:    10485760,
    89  		Index:        10,
    90  		Distribution: []int{9, 10, 1, 2, 3, 4, 5, 6, 7, 8},
    91  	}
    92  	xlMeta.Stat = StatInfo{
    93  		Size:    int64(20),
    94  		ModTime: UTCNow(),
    95  	}
    96  	// Set meta data.
    97  	xlMeta.Meta = make(map[string]string)
    98  	xlMeta.Meta["testKey1"] = "val1"
    99  	xlMeta.Meta["testKey2"] = "val2"
   100  	return xlMeta
   101  }
   102  
   103  func (m *xlMetaV1Object) AddTestObjectCheckSum(partNumber int, algorithm BitrotAlgorithm, hash string) {
   104  	checksum, err := hex.DecodeString(hash)
   105  	if err != nil {
   106  		panic(err)
   107  	}
   108  	m.Erasure.Checksums[partNumber-1] = ChecksumInfo{partNumber, algorithm, checksum}
   109  }
   110  
   111  // AddTestObjectPart - add a new object part in order.
   112  func (m *xlMetaV1Object) AddTestObjectPart(partNumber int, partSize int64) {
   113  	partInfo := ObjectPartInfo{
   114  		Number: partNumber,
   115  		Size:   partSize,
   116  	}
   117  
   118  	// Proceed to include new part info.
   119  	m.Parts[partNumber-1] = partInfo
   120  }
   121  
   122  // Constructs xlMetaV1Object{} for given number of parts and converts it into bytes.
   123  func getXLMetaBytes(totalParts int) []byte {
   124  	xlSampleMeta := getSampleXLMeta(totalParts)
   125  	xlMetaBytes, err := json.Marshal(xlSampleMeta)
   126  	if err != nil {
   127  		panic(err)
   128  	}
   129  	return xlMetaBytes
   130  }
   131  
   132  // Returns sample xlMetaV1Object{} for number of parts.
   133  func getSampleXLMeta(totalParts int) xlMetaV1Object {
   134  	xlMeta := newTestXLMetaV1()
   135  	// Number of checksum info == total parts.
   136  	xlMeta.Erasure.Checksums = make([]ChecksumInfo, totalParts)
   137  	// total number of parts.
   138  	xlMeta.Parts = make([]ObjectPartInfo, totalParts)
   139  	for i := 0; i < totalParts; i++ {
   140  		// hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.meta the magnitude doesn't affect the test,
   141  		// The magnitude doesn't make a difference, only the size does.
   142  		xlMeta.AddTestObjectCheckSum(i+1, BLAKE2b512, "a23f5eff248c4372badd9f3b2455a285cd4ca86c3d9a570b091d3fc5cd7ca6d9484bbea3f8c5d8d4f84daae96874419eda578fd736455334afbac2c924b3915a")
   143  		xlMeta.AddTestObjectPart(i+1, 67108864)
   144  	}
   145  	return xlMeta
   146  }
   147  
   148  // Compare the unmarshaled XLMetaV1 with the one obtained from jsoniter parsing.
   149  func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1Object) {
   150  	// Start comparing the fields of xlMetaV1Object obtained from jsoniter parsing with one parsed using json unmarshalling.
   151  	if unMarshalXLMeta.Version != jsoniterXLMeta.Version {
   152  		t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, jsoniterXLMeta.Version)
   153  	}
   154  	if unMarshalXLMeta.Format != jsoniterXLMeta.Format {
   155  		t.Errorf("Expected the format to be \"%s\", but got \"%s\".", unMarshalXLMeta.Format, jsoniterXLMeta.Format)
   156  	}
   157  	if unMarshalXLMeta.Stat.Size != jsoniterXLMeta.Stat.Size {
   158  		t.Errorf("Expected the stat size to be %v, but got %v.", unMarshalXLMeta.Stat.Size, jsoniterXLMeta.Stat.Size)
   159  	}
   160  	if !unMarshalXLMeta.Stat.ModTime.Equal(jsoniterXLMeta.Stat.ModTime) {
   161  		t.Errorf("Expected the modTime to be \"%v\", but got \"%v\".", unMarshalXLMeta.Stat.ModTime, jsoniterXLMeta.Stat.ModTime)
   162  	}
   163  	if unMarshalXLMeta.Erasure.Algorithm != jsoniterXLMeta.Erasure.Algorithm {
   164  		t.Errorf("Expected the erasure algorithm to be \"%v\", but got \"%v\".", unMarshalXLMeta.Erasure.Algorithm, jsoniterXLMeta.Erasure.Algorithm)
   165  	}
   166  	if unMarshalXLMeta.Erasure.DataBlocks != jsoniterXLMeta.Erasure.DataBlocks {
   167  		t.Errorf("Expected the erasure data blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.DataBlocks, jsoniterXLMeta.Erasure.DataBlocks)
   168  	}
   169  	if unMarshalXLMeta.Erasure.ParityBlocks != jsoniterXLMeta.Erasure.ParityBlocks {
   170  		t.Errorf("Expected the erasure parity blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.ParityBlocks, jsoniterXLMeta.Erasure.ParityBlocks)
   171  	}
   172  	if unMarshalXLMeta.Erasure.BlockSize != jsoniterXLMeta.Erasure.BlockSize {
   173  		t.Errorf("Expected the erasure block size to be %v, but got %v.", unMarshalXLMeta.Erasure.BlockSize, jsoniterXLMeta.Erasure.BlockSize)
   174  	}
   175  	if unMarshalXLMeta.Erasure.Index != jsoniterXLMeta.Erasure.Index {
   176  		t.Errorf("Expected the erasure index to be %v, but got %v.", unMarshalXLMeta.Erasure.Index, jsoniterXLMeta.Erasure.Index)
   177  	}
   178  	if len(unMarshalXLMeta.Erasure.Distribution) != len(jsoniterXLMeta.Erasure.Distribution) {
   179  		t.Errorf("Expected the size of Erasure Distribution to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Distribution), len(jsoniterXLMeta.Erasure.Distribution))
   180  	} else {
   181  		for i := 0; i < len(unMarshalXLMeta.Erasure.Distribution); i++ {
   182  			if unMarshalXLMeta.Erasure.Distribution[i] != jsoniterXLMeta.Erasure.Distribution[i] {
   183  				t.Errorf("Expected the Erasure Distribution to be %d, got %d.", unMarshalXLMeta.Erasure.Distribution[i], jsoniterXLMeta.Erasure.Distribution[i])
   184  			}
   185  		}
   186  	}
   187  
   188  	if len(unMarshalXLMeta.Erasure.Checksums) != len(jsoniterXLMeta.Erasure.Checksums) {
   189  		t.Errorf("Expected the size of Erasure Checksums to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Checksums), len(jsoniterXLMeta.Erasure.Checksums))
   190  	} else {
   191  		for i := 0; i < len(unMarshalXLMeta.Erasure.Checksums); i++ {
   192  			if unMarshalXLMeta.Erasure.Checksums[i].PartNumber != jsoniterXLMeta.Erasure.Checksums[i].PartNumber {
   193  				t.Errorf("Expected the Erasure Checksum PartNumber to be \"%d\", got \"%d\".", unMarshalXLMeta.Erasure.Checksums[i].PartNumber, jsoniterXLMeta.Erasure.Checksums[i].PartNumber)
   194  			}
   195  			if unMarshalXLMeta.Erasure.Checksums[i].Algorithm != jsoniterXLMeta.Erasure.Checksums[i].Algorithm {
   196  				t.Errorf("Expected the Erasure Checksum Algorithm to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Algorithm, jsoniterXLMeta.Erasure.Checksums[i].Algorithm)
   197  			}
   198  			if !bytes.Equal(unMarshalXLMeta.Erasure.Checksums[i].Hash, jsoniterXLMeta.Erasure.Checksums[i].Hash) {
   199  				t.Errorf("Expected the Erasure Checksum Hash to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Hash, jsoniterXLMeta.Erasure.Checksums[i].Hash)
   200  			}
   201  		}
   202  	}
   203  
   204  	if unMarshalXLMeta.Minio.Release != jsoniterXLMeta.Minio.Release {
   205  		t.Errorf("Expected the Release string to be \"%s\", but got \"%s\".", unMarshalXLMeta.Minio.Release, jsoniterXLMeta.Minio.Release)
   206  	}
   207  	if len(unMarshalXLMeta.Parts) != len(jsoniterXLMeta.Parts) {
   208  		t.Errorf("Expected info of  %d parts to be present, but got %d instead.", len(unMarshalXLMeta.Parts), len(jsoniterXLMeta.Parts))
   209  	} else {
   210  		for i := 0; i < len(unMarshalXLMeta.Parts); i++ {
   211  			if unMarshalXLMeta.Parts[i].Number != jsoniterXLMeta.Parts[i].Number {
   212  				t.Errorf("Expected the number of part %d to be \"%d\", got \"%d\".", i+1, unMarshalXLMeta.Parts[i].Number, jsoniterXLMeta.Parts[i].Number)
   213  			}
   214  			if unMarshalXLMeta.Parts[i].Size != jsoniterXLMeta.Parts[i].Size {
   215  				t.Errorf("Expected the size of part %d to be %v, got %v.", i+1, unMarshalXLMeta.Parts[i].Size, jsoniterXLMeta.Parts[i].Size)
   216  			}
   217  		}
   218  	}
   219  
   220  	for key, val := range unMarshalXLMeta.Meta {
   221  		jsoniterVal, exists := jsoniterXLMeta.Meta[key]
   222  		if !exists {
   223  			t.Errorf("No meta data entry for Key \"%s\" exists.", key)
   224  		}
   225  		if val != jsoniterVal {
   226  			t.Errorf("Expected the value for Meta data key \"%s\" to be \"%s\", but got \"%s\".", key, val, jsoniterVal)
   227  		}
   228  
   229  	}
   230  }
   231  
   232  // Tests the correctness of constructing XLMetaV1 using jsoniter lib.
   233  // The result will be compared with the result obtained from json.unMarshal of the byte data.
   234  func TestGetXLMetaV1Jsoniter1(t *testing.T) {
   235  	xlMetaJSON := getXLMetaBytes(1)
   236  
   237  	var unMarshalXLMeta xlMetaV1Object
   238  	if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil {
   239  		t.Errorf("Unmarshalling failed: %v", err)
   240  	}
   241  
   242  	var jsoniterXLMeta xlMetaV1Object
   243  	json := jsoniter.ConfigCompatibleWithStandardLibrary
   244  	if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil {
   245  		t.Errorf("jsoniter parsing of XLMeta failed: %v", err)
   246  	}
   247  	compareXLMetaV1(t, unMarshalXLMeta, jsoniterXLMeta)
   248  }
   249  
   250  // Tests the correctness of constructing XLMetaV1 using jsoniter lib for XLMetaV1 of size 10 parts.
   251  // The result will be compared with the result obtained from json.unMarshal of the byte data.
   252  func TestGetXLMetaV1Jsoniter10(t *testing.T) {
   253  	xlMetaJSON := getXLMetaBytes(10)
   254  
   255  	var unMarshalXLMeta xlMetaV1Object
   256  	if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil {
   257  		t.Errorf("Unmarshalling failed: %v", err)
   258  	}
   259  
   260  	var jsoniterXLMeta xlMetaV1Object
   261  	json := jsoniter.ConfigCompatibleWithStandardLibrary
   262  	if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil {
   263  		t.Errorf("jsoniter parsing of XLMeta failed: %v", err)
   264  	}
   265  
   266  	compareXLMetaV1(t, unMarshalXLMeta, jsoniterXLMeta)
   267  }
   268  
   269  // Test the predicted part size from the part index
   270  func TestGetPartSizeFromIdx(t *testing.T) {
   271  	// Create test cases
   272  	testCases := []struct {
   273  		totalSize    int64
   274  		partSize     int64
   275  		partIndex    int
   276  		expectedSize int64
   277  	}{
   278  		// Total size is zero
   279  		{0, 10, 1, 0},
   280  		// part size 2MiB, total size 4MiB
   281  		{4 * humanize.MiByte, 2 * humanize.MiByte, 1, 2 * humanize.MiByte},
   282  		{4 * humanize.MiByte, 2 * humanize.MiByte, 2, 2 * humanize.MiByte},
   283  		{4 * humanize.MiByte, 2 * humanize.MiByte, 3, 0},
   284  		// part size 2MiB, total size 5MiB
   285  		{5 * humanize.MiByte, 2 * humanize.MiByte, 1, 2 * humanize.MiByte},
   286  		{5 * humanize.MiByte, 2 * humanize.MiByte, 2, 2 * humanize.MiByte},
   287  		{5 * humanize.MiByte, 2 * humanize.MiByte, 3, 1 * humanize.MiByte},
   288  		{5 * humanize.MiByte, 2 * humanize.MiByte, 4, 0},
   289  	}
   290  
   291  	for i, testCase := range testCases {
   292  		s, err := calculatePartSizeFromIdx(GlobalContext, testCase.totalSize, testCase.partSize, testCase.partIndex)
   293  		if err != nil {
   294  			t.Errorf("Test %d: Expected to pass but failed. %s", i+1, err)
   295  		}
   296  		if err == nil && s != testCase.expectedSize {
   297  			t.Errorf("Test %d: The calculated part size is incorrect: expected = %d, found = %d\n", i+1, testCase.expectedSize, s)
   298  		}
   299  	}
   300  
   301  	testCasesFailure := []struct {
   302  		totalSize int64
   303  		partSize  int64
   304  		partIndex int
   305  		err       error
   306  	}{
   307  		// partSize is 0, returns error.
   308  		{10, 0, 1, errPartSizeZero},
   309  		// partIndex is 0, returns error.
   310  		{10, 1, 0, errPartSizeIndex},
   311  		// Total size is -1, returns error.
   312  		{-2, 10, 1, errInvalidArgument},
   313  	}
   314  
   315  	for i, testCaseFailure := range testCasesFailure {
   316  		_, err := calculatePartSizeFromIdx(GlobalContext, testCaseFailure.totalSize, testCaseFailure.partSize, testCaseFailure.partIndex)
   317  		if err == nil {
   318  			t.Errorf("Test %d: Expected to failed but passed. %s", i+1, err)
   319  		}
   320  		if err != nil && err != testCaseFailure.err {
   321  			t.Errorf("Test %d: Expected err %s, but got %s", i+1, testCaseFailure.err, err)
   322  		}
   323  	}
   324  }
   325  
   326  func BenchmarkXlMetaV2Shallow(b *testing.B) {
   327  	fi := FileInfo{
   328  		Volume:           "volume",
   329  		Name:             "object-name",
   330  		VersionID:        "756100c6-b393-4981-928a-d49bbc164741",
   331  		IsLatest:         true,
   332  		Deleted:          false,
   333  		TransitionStatus: "PENDING",
   334  		DataDir:          "bffea160-ca7f-465f-98bc-9b4f1c3ba1ef",
   335  		XLV1:             false,
   336  		ModTime:          time.Now(),
   337  		Size:             1234456,
   338  		Mode:             0,
   339  		Metadata: map[string]string{
   340  			xhttp.AmzRestore:                 "FAILED",
   341  			xhttp.ContentMD5:                 mustGetUUID(),
   342  			xhttp.AmzBucketReplicationStatus: "PENDING",
   343  			xhttp.ContentType:                "application/json",
   344  		},
   345  		Parts: []ObjectPartInfo{
   346  			{
   347  				Number:     1,
   348  				Size:       1234345,
   349  				ActualSize: 1234345,
   350  			},
   351  			{
   352  				Number:     2,
   353  				Size:       1234345,
   354  				ActualSize: 1234345,
   355  			},
   356  		},
   357  		Erasure: ErasureInfo{
   358  			Algorithm:    ReedSolomon.String(),
   359  			DataBlocks:   4,
   360  			ParityBlocks: 2,
   361  			BlockSize:    10000,
   362  			Index:        1,
   363  			Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8},
   364  			Checksums: []ChecksumInfo{
   365  				{
   366  					PartNumber: 1,
   367  					Algorithm:  HighwayHash256S,
   368  					Hash:       nil,
   369  				},
   370  				{
   371  					PartNumber: 2,
   372  					Algorithm:  HighwayHash256S,
   373  					Hash:       nil,
   374  				},
   375  			},
   376  		},
   377  	}
   378  	for _, size := range []int{1, 10, 1000, 100_000} {
   379  		b.Run(fmt.Sprint(size, "-versions"), func(b *testing.B) {
   380  			var xl xlMetaV2
   381  			ids := make([]string, size)
   382  			for i := 0; i < size; i++ {
   383  				fi.VersionID = mustGetUUID()
   384  				fi.DataDir = mustGetUUID()
   385  				ids[i] = fi.VersionID
   386  				fi.ModTime = fi.ModTime.Add(-time.Second)
   387  				xl.AddVersion(fi)
   388  			}
   389  			// Encode all. This is used for benchmarking.
   390  			enc, err := xl.AppendTo(nil)
   391  			if err != nil {
   392  				b.Fatal(err)
   393  			}
   394  			b.Logf("Serialized size: %d bytes", len(enc))
   395  			rng := rand.New(rand.NewSource(0))
   396  			dump := make([]byte, len(enc))
   397  			b.Run("UpdateObjectVersion", func(b *testing.B) {
   398  				b.SetBytes(int64(size))
   399  				b.ResetTimer()
   400  				b.ReportAllocs()
   401  				for i := 0; i < b.N; i++ {
   402  					// Load...
   403  					xl = xlMetaV2{}
   404  					err := xl.Load(enc)
   405  					if err != nil {
   406  						b.Fatal(err)
   407  					}
   408  					// Update modtime for resorting...
   409  					fi.ModTime = fi.ModTime.Add(-time.Second)
   410  					// Update a random version.
   411  					fi.VersionID = ids[rng.Intn(size)]
   412  					// Update...
   413  					err = xl.UpdateObjectVersion(fi)
   414  					if err != nil {
   415  						b.Fatal(err)
   416  					}
   417  					// Save...
   418  					dump, err = xl.AppendTo(dump[:0])
   419  					if err != nil {
   420  						b.Fatal(err)
   421  					}
   422  				}
   423  			})
   424  			b.Run("DeleteVersion", func(b *testing.B) {
   425  				b.SetBytes(int64(size))
   426  				b.ResetTimer()
   427  				b.ReportAllocs()
   428  				for i := 0; i < b.N; i++ {
   429  					// Load...
   430  					xl = xlMetaV2{}
   431  					err := xl.Load(enc)
   432  					if err != nil {
   433  						b.Fatal(err)
   434  					}
   435  					// Update a random version.
   436  					fi.VersionID = ids[rng.Intn(size)]
   437  					// Delete...
   438  					_, err = xl.DeleteVersion(fi)
   439  					if err != nil {
   440  						b.Fatal(err)
   441  					}
   442  					// Save...
   443  					dump, err = xl.AppendTo(dump[:0])
   444  					if err != nil {
   445  						b.Fatal(err)
   446  					}
   447  				}
   448  			})
   449  			b.Run("AddVersion", func(b *testing.B) {
   450  				b.SetBytes(int64(size))
   451  				b.ResetTimer()
   452  				b.ReportAllocs()
   453  				for i := 0; i < b.N; i++ {
   454  					// Load...
   455  					xl = xlMetaV2{}
   456  					err := xl.Load(enc)
   457  					if err != nil {
   458  						b.Fatal(err)
   459  					}
   460  					// Update modtime for resorting...
   461  					fi.ModTime = fi.ModTime.Add(-time.Second)
   462  					// Update a random version.
   463  					fi.VersionID = mustGetUUID()
   464  					// Add...
   465  					err = xl.AddVersion(fi)
   466  					if err != nil {
   467  						b.Fatal(err)
   468  					}
   469  					// Save...
   470  					dump, err = xl.AppendTo(dump[:0])
   471  					if err != nil {
   472  						b.Fatal(err)
   473  					}
   474  				}
   475  			})
   476  			b.Run("ToFileInfo", func(b *testing.B) {
   477  				b.SetBytes(int64(size))
   478  				b.ResetTimer()
   479  				b.ReportAllocs()
   480  				for i := 0; i < b.N; i++ {
   481  					// Load...
   482  					xl = xlMetaV2{}
   483  					err := xl.Load(enc)
   484  					if err != nil {
   485  						b.Fatal(err)
   486  					}
   487  					// List...
   488  					_, err = xl.ToFileInfo("volume", "path", ids[rng.Intn(size)], false, true)
   489  					if err != nil {
   490  						b.Fatal(err)
   491  					}
   492  				}
   493  			})
   494  			b.Run("ListVersions", func(b *testing.B) {
   495  				b.SetBytes(int64(size))
   496  				b.ResetTimer()
   497  				b.ReportAllocs()
   498  				for i := 0; i < b.N; i++ {
   499  					// Load...
   500  					xl = xlMetaV2{}
   501  					err := xl.Load(enc)
   502  					if err != nil {
   503  						b.Fatal(err)
   504  					}
   505  					// List...
   506  					_, err = xl.ListVersions("volume", "path", true)
   507  					if err != nil {
   508  						b.Fatal(err)
   509  					}
   510  				}
   511  			})
   512  			b.Run("ToFileInfoNew", func(b *testing.B) {
   513  				b.SetBytes(int64(size))
   514  				b.ResetTimer()
   515  				b.ReportAllocs()
   516  				for i := 0; i < b.N; i++ {
   517  					buf, _, _ := isIndexedMetaV2(enc)
   518  					if buf == nil {
   519  						b.Fatal("buf == nil")
   520  					}
   521  					_, err = buf.ToFileInfo("volume", "path", ids[rng.Intn(size)], true)
   522  					if err != nil {
   523  						b.Fatal(err)
   524  					}
   525  				}
   526  			})
   527  			b.Run("ListVersionsNew", func(b *testing.B) {
   528  				b.SetBytes(int64(size))
   529  				b.ResetTimer()
   530  				b.ReportAllocs()
   531  				for i := 0; i < b.N; i++ {
   532  					buf, _, _ := isIndexedMetaV2(enc)
   533  					if buf == nil {
   534  						b.Fatal("buf == nil")
   535  					}
   536  					_, err = buf.ListVersions("volume", "path", true)
   537  					if err != nil {
   538  						b.Fatal(err)
   539  					}
   540  				}
   541  			})
   542  		})
   543  	}
   544  }