storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/erasure-heal_test.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package cmd
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/rand"
    23  	"io"
    24  	"os"
    25  	"testing"
    26  )
    27  
    28  var erasureHealTests = []struct {
    29  	dataBlocks, disks int
    30  
    31  	// number of offline disks is also number of staleDisks for
    32  	// erasure reconstruction in this test
    33  	offDisks int
    34  
    35  	// bad disks are online disks which return errors
    36  	badDisks, badStaleDisks int
    37  
    38  	blocksize, size int64
    39  	algorithm       BitrotAlgorithm
    40  	shouldFail      bool
    41  }{
    42  	{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: SHA256, shouldFail: false},                   // 0
    43  	{dataBlocks: 3, disks: 6, offDisks: 2, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false},               // 1
    44  	{dataBlocks: 4, disks: 8, offDisks: 2, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false},               // 2
    45  	{dataBlocks: 5, disks: 10, offDisks: 3, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false},  // 3
    46  	{dataBlocks: 6, disks: 12, offDisks: 2, badDisks: 3, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: SHA256, shouldFail: false},                  // 4
    47  	{dataBlocks: 7, disks: 14, offDisks: 4, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false},  // 5
    48  	{dataBlocks: 8, disks: 16, offDisks: 6, badDisks: 1, badStaleDisks: 1, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false},  // 6
    49  	{dataBlocks: 7, disks: 14, offDisks: 2, badDisks: 3, badStaleDisks: 0, blocksize: int64(oneMiByte / 2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false},            // 7
    50  	{dataBlocks: 6, disks: 12, offDisks: 1, badDisks: 0, badStaleDisks: 1, blocksize: int64(oneMiByte - 1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true}, // 8
    51  	{dataBlocks: 5, disks: 10, offDisks: 3, badDisks: 0, badStaleDisks: 3, blocksize: int64(oneMiByte / 2), size: oneMiByte, algorithm: SHA256, shouldFail: true},                 // 9
    52  	{dataBlocks: 4, disks: 8, offDisks: 1, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false},   // 10
    53  	{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 1, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true},    // 11
    54  	{dataBlocks: 6, disks: 12, offDisks: 8, badDisks: 3, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true},   // 12
    55  	{dataBlocks: 7, disks: 14, offDisks: 3, badDisks: 4, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false},              // 13
    56  	{dataBlocks: 7, disks: 14, offDisks: 6, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false},  // 14
    57  	{dataBlocks: 8, disks: 16, offDisks: 4, badDisks: 5, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true},   // 15
    58  	{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false},   // 16
    59  	{dataBlocks: 12, disks: 16, offDisks: 2, badDisks: 1, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false}, // 17
    60  	{dataBlocks: 6, disks: 8, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false},               // 18
    61  	{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV2), size: oneMiByte * 64, algorithm: SHA256, shouldFail: false},              // 19
    62  }
    63  
    64  func TestErasureHeal(t *testing.T) {
    65  	for i, test := range erasureHealTests {
    66  		if test.offDisks < test.badStaleDisks {
    67  			// test case sanity check
    68  			t.Fatalf("Test %d: Bad test case - number of stale disks cannot be less than number of badstale disks", i)
    69  		}
    70  
    71  		// create some test data
    72  		setup, err := newErasureTestSetup(test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
    73  		if err != nil {
    74  			t.Fatalf("Test %d: failed to setup Erasure environment: %v", i, err)
    75  		}
    76  		disks := setup.disks
    77  		erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
    78  		if err != nil {
    79  			setup.Remove()
    80  			t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
    81  		}
    82  		data := make([]byte, test.size)
    83  		if _, err = io.ReadFull(rand.Reader, data); err != nil {
    84  			setup.Remove()
    85  			t.Fatalf("Test %d: failed to create random test data: %v", i, err)
    86  		}
    87  		buffer := make([]byte, test.blocksize, 2*test.blocksize)
    88  		writers := make([]io.Writer, len(disks))
    89  		for i, disk := range disks {
    90  			writers[i] = newBitrotWriter(disk, "testbucket", "testobject",
    91  				erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize(), true)
    92  		}
    93  		_, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
    94  		closeBitrotWriters(writers)
    95  		if err != nil {
    96  			setup.Remove()
    97  			t.Fatalf("Test %d: failed to create random test data: %v", i, err)
    98  		}
    99  
   100  		readers := make([]io.ReaderAt, len(disks))
   101  		for i, disk := range disks {
   102  			shardFilesize := erasure.ShardFileSize(test.size)
   103  			readers[i] = newBitrotReader(disk, nil, "testbucket", "testobject", shardFilesize, test.algorithm, bitrotWriterSum(writers[i]), erasure.ShardSize())
   104  		}
   105  
   106  		// setup stale disks for the test case
   107  		staleDisks := make([]StorageAPI, len(disks))
   108  		copy(staleDisks, disks)
   109  		for j := 0; j < len(staleDisks); j++ {
   110  			if j < test.offDisks {
   111  				readers[j] = nil
   112  			} else {
   113  				staleDisks[j] = nil
   114  			}
   115  		}
   116  		for j := 0; j < test.badDisks; j++ {
   117  			switch r := readers[test.offDisks+j].(type) {
   118  			case *streamingBitrotReader:
   119  				r.disk = badDisk{nil}
   120  			case *wholeBitrotReader:
   121  				r.disk = badDisk{nil}
   122  			}
   123  		}
   124  		for j := 0; j < test.badStaleDisks; j++ {
   125  			staleDisks[j] = badDisk{nil}
   126  		}
   127  
   128  		staleWriters := make([]io.Writer, len(staleDisks))
   129  		for i, disk := range staleDisks {
   130  			if disk == nil {
   131  				continue
   132  			}
   133  			os.Remove(pathJoin(disk.String(), "testbucket", "testobject"))
   134  			staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject",
   135  				erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize(), true)
   136  		}
   137  
   138  		// test case setup is complete - now call Heal()
   139  		err = erasure.Heal(context.Background(), readers, staleWriters, test.size)
   140  		closeBitrotReaders(readers)
   141  		closeBitrotWriters(staleWriters)
   142  		if err != nil && !test.shouldFail {
   143  			t.Errorf("Test %d: should pass but it failed with: %v", i, err)
   144  		}
   145  		if err == nil && test.shouldFail {
   146  			t.Errorf("Test %d: should fail but it passed", i)
   147  		}
   148  		if err == nil {
   149  			// Verify that checksums of staleDisks
   150  			// match expected values
   151  			for i := range staleWriters {
   152  				if staleWriters[i] == nil {
   153  					continue
   154  				}
   155  				if !bytes.Equal(bitrotWriterSum(staleWriters[i]), bitrotWriterSum(writers[i])) {
   156  					t.Errorf("Test %d: heal returned different bitrot checksums", i)
   157  				}
   158  			}
   159  		}
   160  		setup.Remove()
   161  	}
   162  }