storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/erasure-encode_test.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package cmd
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/rand"
    23  	"io"
    24  	"testing"
    25  
    26  	humanize "github.com/dustin/go-humanize"
    27  )
    28  
    29  type badDisk struct{ StorageAPI }
    30  
    31  func (a badDisk) String() string {
    32  	return "bad-disk"
    33  }
    34  
    35  func (a badDisk) AppendFile(ctx context.Context, volume string, path string, buf []byte) error {
    36  	return errFaultyDisk
    37  }
    38  
    39  func (a badDisk) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
    40  	return nil, errFaultyDisk
    41  }
    42  
    43  func (a badDisk) UpdateBloomFilter(ctx context.Context, oldest, current uint64) (*bloomFilterResponse, error) {
    44  	return nil, errFaultyDisk
    45  }
    46  
    47  func (a badDisk) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error {
    48  	return errFaultyDisk
    49  }
    50  
    51  func (badDisk) Hostname() string {
    52  	return ""
    53  }
    54  
    55  const oneMiByte = 1 * humanize.MiByte
    56  
    57  var erasureEncodeTests = []struct {
    58  	dataBlocks                   int
    59  	onDisks, offDisks            int
    60  	blocksize, data              int64
    61  	offset                       int
    62  	algorithm                    BitrotAlgorithm
    63  	shouldFail, shouldFailQuorum bool
    64  }{
    65  	{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false},                             // 0
    66  	{dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 1, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false},                                 // 1
    67  	{dataBlocks: 4, onDisks: 8, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false},                 // 2
    68  	{dataBlocks: 5, onDisks: 10, offDisks: 3, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false},                    // 3
    69  	{dataBlocks: 6, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false},                    // 4
    70  	{dataBlocks: 7, onDisks: 14, offDisks: 5, blocksize: int64(blockSizeV2), data: 0, offset: 0, shouldFail: false, algorithm: SHA256, shouldFailQuorum: false},                                        // 5
    71  	{dataBlocks: 8, onDisks: 16, offDisks: 7, blocksize: int64(blockSizeV2), data: 0, offset: 0, shouldFail: false, algorithm: DefaultBitrotAlgorithm, shouldFailQuorum: false},                        // 6
    72  	{dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: true},                              // 7
    73  	{dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: SHA256, shouldFail: false, shouldFailQuorum: true},                                  // 8
    74  	{dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true},                 // 9
    75  	{dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true},                 // 10
    76  	{dataBlocks: 5, onDisks: 10, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false},                  // 11
    77  	{dataBlocks: 3, onDisks: 6, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte / 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false},     // 12
    78  	{dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(oneMiByte / 2), data: oneMiByte, offset: oneMiByte/2 + 1, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13
    79  	{dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(oneMiByte - 1), data: oneMiByte, offset: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false},               // 14
    80  	{dataBlocks: 8, onDisks: 12, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false},                // 15
    81  	{dataBlocks: 8, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false},                // 16
    82  	{dataBlocks: 10, onDisks: 14, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 17, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false},              // 17
    83  	{dataBlocks: 2, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte / 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false},       // 18
    84  	{dataBlocks: 10, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true},                // 19
    85  }
    86  
    87  func TestErasureEncode(t *testing.T) {
    88  	for i, test := range erasureEncodeTests {
    89  		setup, err := newErasureTestSetup(test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
    90  		if err != nil {
    91  			t.Fatalf("Test %d: failed to create test setup: %v", i, err)
    92  		}
    93  		disks := setup.disks
    94  		erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
    95  		if err != nil {
    96  			setup.Remove()
    97  			t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
    98  		}
    99  		buffer := make([]byte, test.blocksize, 2*test.blocksize)
   100  
   101  		data := make([]byte, test.data)
   102  		if _, err = io.ReadFull(rand.Reader, data); err != nil {
   103  			setup.Remove()
   104  			t.Fatalf("Test %d: failed to generate random test data: %v", i, err)
   105  		}
   106  		writers := make([]io.Writer, len(disks))
   107  		for i, disk := range disks {
   108  			if disk == OfflineDisk {
   109  				continue
   110  			}
   111  			writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize(), false)
   112  		}
   113  		n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
   114  		closeBitrotWriters(writers)
   115  		if err != nil && !test.shouldFail {
   116  			t.Errorf("Test %d: should pass but failed with: %v", i, err)
   117  		}
   118  		if err == nil && test.shouldFail {
   119  			t.Errorf("Test %d: should fail but it passed", i)
   120  		}
   121  		for i, w := range writers {
   122  			if w == nil {
   123  				disks[i] = OfflineDisk
   124  			}
   125  		}
   126  		if err == nil {
   127  			if length := int64(len(data[test.offset:])); n != length {
   128  				t.Errorf("Test %d: invalid number of bytes written: got: #%d want #%d", i, n, length)
   129  			}
   130  			writers := make([]io.Writer, len(disks))
   131  			for i, disk := range disks {
   132  				if disk == nil {
   133  					continue
   134  				}
   135  				writers[i] = newBitrotWriter(disk, "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize(), false)
   136  			}
   137  			for j := range disks[:test.offDisks] {
   138  				switch w := writers[j].(type) {
   139  				case *wholeBitrotWriter:
   140  					w.disk = badDisk{nil}
   141  				case *streamingBitrotWriter:
   142  					w.closeWithErr(errFaultyDisk)
   143  				}
   144  			}
   145  			if test.offDisks > 0 {
   146  				writers[0] = nil
   147  			}
   148  			n, err = erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
   149  			closeBitrotWriters(writers)
   150  			if err != nil && !test.shouldFailQuorum {
   151  				t.Errorf("Test %d: should pass but failed with: %v", i, err)
   152  			}
   153  			if err == nil && test.shouldFailQuorum {
   154  				t.Errorf("Test %d: should fail but it passed", i)
   155  			}
   156  			if err == nil {
   157  				if length := int64(len(data[test.offset:])); n != length {
   158  					t.Errorf("Test %d: invalid number of bytes written: got: #%d want #%d", i, n, length)
   159  				}
   160  			}
   161  		}
   162  		setup.Remove()
   163  	}
   164  }
   165  
   166  // Benchmarks
   167  
   168  func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64, b *testing.B) {
   169  	setup, err := newErasureTestSetup(data, parity, blockSizeV2)
   170  	if err != nil {
   171  		b.Fatalf("failed to create test setup: %v", err)
   172  	}
   173  	defer setup.Remove()
   174  	erasure, err := NewErasure(context.Background(), data, parity, blockSizeV2)
   175  	if err != nil {
   176  		b.Fatalf("failed to create ErasureStorage: %v", err)
   177  	}
   178  	disks := setup.disks
   179  	buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
   180  	content := make([]byte, size)
   181  
   182  	for i := 0; i < dataDown; i++ {
   183  		disks[i] = OfflineDisk
   184  	}
   185  	for i := data; i < data+parityDown; i++ {
   186  		disks[i] = OfflineDisk
   187  	}
   188  
   189  	b.ResetTimer()
   190  	b.SetBytes(size)
   191  	b.ReportAllocs()
   192  	for i := 0; i < b.N; i++ {
   193  		writers := make([]io.Writer, len(disks))
   194  		for i, disk := range disks {
   195  			if disk == OfflineDisk {
   196  				continue
   197  			}
   198  			disk.Delete(context.Background(), "testbucket", "object", false)
   199  			writers[i] = newBitrotWriter(disk, "testbucket", "object",
   200  				erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize(), false)
   201  		}
   202  		_, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
   203  		closeBitrotWriters(writers)
   204  		if err != nil {
   205  			panic(err)
   206  		}
   207  	}
   208  }
   209  
   210  func BenchmarkErasureEncodeQuick(b *testing.B) {
   211  	const size = 12 * 1024 * 1024
   212  	b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 0, size, b) })
   213  	b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 1, size, b) })
   214  	b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 1, 0, size, b) })
   215  }
   216  
   217  func BenchmarkErasureEncode_4_64KB(b *testing.B) {
   218  	const size = 64 * 1024
   219  	b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 0, size, b) })
   220  	b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 1, size, b) })
   221  	b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 1, 0, size, b) })
   222  }
   223  
   224  func BenchmarkErasureEncode_8_20MB(b *testing.B) {
   225  	const size = 20 * 1024 * 1024
   226  	b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 0, size, b) })
   227  	b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 1, size, b) })
   228  	b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 1, 0, size, b) })
   229  	b.Run(" 0000|XXX0 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 3, size, b) })
   230  	b.Run(" XXX0|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 3, 0, size, b) })
   231  }
   232  
   233  func BenchmarkErasureEncode_12_30MB(b *testing.B) {
   234  	const size = 30 * 1024 * 1024
   235  	b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 0, size, b) })
   236  	b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 1, size, b) })
   237  	b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 1, 0, size, b) })
   238  	b.Run(" 000000|XXXXX0 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 5, size, b) })
   239  	b.Run(" XXXXX0|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 5, 0, size, b) })
   240  }
   241  
   242  func BenchmarkErasureEncode_16_40MB(b *testing.B) {
   243  	const size = 40 * 1024 * 1024
   244  	b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 0, size, b) })
   245  	b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 1, size, b) })
   246  	b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 1, 0, size, b) })
   247  	b.Run(" 00000000|XXXXXXX0 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 7, size, b) })
   248  	b.Run(" XXXXXXX0|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 7, 0, size, b) })
   249  }