storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/erasure-decode_test.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "bytes" 21 "context" 22 "io" 23 "math/rand" 24 "testing" 25 26 crand "crypto/rand" 27 28 humanize "github.com/dustin/go-humanize" 29 ) 30 31 func (a badDisk) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { 32 return 0, errFaultyDisk 33 } 34 35 var erasureDecodeTests = []struct { 36 dataBlocks int 37 onDisks, offDisks int 38 blocksize, data int64 39 offset int64 40 length int64 41 algorithm BitrotAlgorithm 42 shouldFail, shouldFailQuorum bool 43 }{ 44 {dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0 45 {dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1 46 {dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2 47 {dataBlocks: 5, onDisks: 10, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 1, length: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3 48 {dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte, length: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, 49 // 4 50 {dataBlocks: 7, onDisks: 14, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 3, length: 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 5 51 {dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(oneMiByte), data: oneMiByte, offset: 4, length: 8 * 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 6 52 {dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte, length: 1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 7 53 {dataBlocks: 6, onDisks: 12, offDisks: 6, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 8 54 {dataBlocks: 5, onDisks: 10, offDisks: 5, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 9 55 {dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 10 56 {dataBlocks: 3, onDisks: 6, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 11 57 {dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12 58 {dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13 59 {dataBlocks: 3, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 14 60 {dataBlocks: 4, onDisks: 8, offDisks: 3, blocksize: int64(2 * oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15 61 {dataBlocks: 5, onDisks: 10, offDisks: 6, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 16 62 {dataBlocks: 5, onDisks: 10, offDisks: 2, blocksize: int64(blockSizeV2), data: 2 * oneMiByte, offset: oneMiByte, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17 63 {dataBlocks: 5, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 18 64 {dataBlocks: 6, onDisks: 12, offDisks: 3, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, 65 // 19 66 {dataBlocks: 6, onDisks: 12, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 20 67 {dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 21 68 {dataBlocks: 8, onDisks: 16, offDisks: 9, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 22 69 {dataBlocks: 8, onDisks: 16, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 23 70 {dataBlocks: 2, onDisks: 4, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 24 71 {dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, length: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 25 72 {dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(blockSizeV2) + 1, offset: 0, length: int64(blockSizeV2) + 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 26 73 {dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 12, length: int64(blockSizeV2) + 17, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 27 74 {dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 1023, length: int64(blockSizeV2) + 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 28 75 {dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 11, length: int64(blockSizeV2) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 29 76 {dataBlocks: 6, onDisks: 12, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 512, length: int64(blockSizeV2) + 8*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 30 77 {dataBlocks: 8, onDisks: 16, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: int64(blockSizeV2), length: int64(blockSizeV2) - 1, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 31 78 {dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(oneMiByte), offset: -1, length: 3, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 32 79 {dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(oneMiByte), offset: 1024, length: -1, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 33 80 {dataBlocks: 4, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: int64(blockSizeV2), offset: 0, length: int64(blockSizeV2), algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 34 81 {dataBlocks: 4, onDisks: 6, offDisks: 1, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 12, length: int64(blockSizeV2) + 17, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 35 82 {dataBlocks: 4, onDisks: 6, offDisks: 3, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 1023, length: int64(blockSizeV2) + 1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 36 83 {dataBlocks: 8, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV2), data: int64(2 * blockSizeV2), offset: 11, length: int64(blockSizeV2) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 37 84 } 85 86 func TestErasureDecode(t *testing.T) { 87 for i, test := range erasureDecodeTests { 88 setup, err := newErasureTestSetup(test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) 89 if err != nil { 90 t.Fatalf("Test %d: failed to create test setup: %v", i, err) 91 } 92 erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) 93 if err != nil { 94 setup.Remove() 95 t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) 96 } 97 disks := setup.disks 98 data := make([]byte, test.data) 99 if _, err = io.ReadFull(crand.Reader, data); err != nil { 100 setup.Remove() 101 t.Fatalf("Test %d: failed to generate random test data: %v", i, err) 102 } 103 104 writeAlgorithm := test.algorithm 105 if !test.algorithm.Available() { 106 writeAlgorithm = DefaultBitrotAlgorithm 107 } 108 buffer := make([]byte, test.blocksize, 2*test.blocksize) 109 writers := make([]io.Writer, len(disks)) 110 for i, disk := range disks { 111 writers[i] = newBitrotWriter(disk, "testbucket", "object", 112 erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize(), false) 113 } 114 n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1) 115 closeBitrotWriters(writers) 116 if err != nil { 117 setup.Remove() 118 t.Fatalf("Test %d: failed to create erasure test file: %v", i, err) 119 } 120 if n != test.data { 121 setup.Remove() 122 t.Fatalf("Test %d: failed to create erasure test file", i) 123 } 124 for i, w := range writers { 125 if w == nil { 126 disks[i] = nil 127 } 128 } 129 130 // Get the checksums of the current part. 131 bitrotReaders := make([]io.ReaderAt, len(disks)) 132 for index, disk := range disks { 133 if disk == OfflineDisk { 134 continue 135 } 136 tillOffset := erasure.ShardFileOffset(test.offset, test.length, test.data) 137 138 bitrotReaders[index] = newBitrotReader(disk, nil, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize()) 139 } 140 141 writer := bytes.NewBuffer(nil) 142 _, err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data, nil) 143 closeBitrotReaders(bitrotReaders) 144 if err != nil && !test.shouldFail { 145 t.Errorf("Test %d: should pass but failed with: %v", i, err) 146 } 147 if err == nil && test.shouldFail { 148 t.Errorf("Test %d: should fail but it passed", i) 149 } 150 if err == nil { 151 if content := writer.Bytes(); !bytes.Equal(content, data[test.offset:test.offset+test.length]) { 152 t.Errorf("Test %d: read retruns wrong file content.", i) 153 } 154 } 155 156 for i, r := range bitrotReaders { 157 if r == nil { 158 disks[i] = OfflineDisk 159 } 160 } 161 if err == nil && !test.shouldFail { 162 bitrotReaders = make([]io.ReaderAt, len(disks)) 163 for index, disk := range disks { 164 if disk == OfflineDisk { 165 continue 166 } 167 tillOffset := erasure.ShardFileOffset(test.offset, test.length, test.data) 168 bitrotReaders[index] = newBitrotReader(disk, nil, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize()) 169 } 170 for j := range disks[:test.offDisks] { 171 if bitrotReaders[j] == nil { 172 continue 173 } 174 switch r := bitrotReaders[j].(type) { 175 case *wholeBitrotReader: 176 r.disk = badDisk{nil} 177 case *streamingBitrotReader: 178 r.disk = badDisk{nil} 179 } 180 } 181 if test.offDisks > 0 { 182 bitrotReaders[0] = nil 183 } 184 writer.Reset() 185 _, err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data, nil) 186 closeBitrotReaders(bitrotReaders) 187 if err != nil && !test.shouldFailQuorum { 188 t.Errorf("Test %d: should pass but failed with: %v", i, err) 189 } 190 if err == nil && test.shouldFailQuorum { 191 t.Errorf("Test %d: should fail but it passed", i) 192 } 193 if !test.shouldFailQuorum { 194 if content := writer.Bytes(); !bytes.Equal(content, data[test.offset:test.offset+test.length]) { 195 t.Errorf("Test %d: read returns wrong file content", i) 196 } 197 } 198 } 199 setup.Remove() 200 } 201 } 202 203 // Test erasureDecode with random offset and lengths. 204 // This test is t.Skip()ed as it a long time to run, hence should be run 205 // explicitly after commenting out t.Skip() 206 func TestErasureDecodeRandomOffsetLength(t *testing.T) { 207 if testing.Short() { 208 t.Skip() 209 } 210 // Initialize environment needed for the test. 211 dataBlocks := 7 212 parityBlocks := 7 213 blockSize := int64(1 * humanize.MiByte) 214 setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) 215 if err != nil { 216 t.Error(err) 217 return 218 } 219 defer setup.Remove() 220 disks := setup.disks 221 erasure, err := NewErasure(context.Background(), dataBlocks, parityBlocks, blockSize) 222 if err != nil { 223 t.Fatalf("failed to create ErasureStorage: %v", err) 224 } 225 // Prepare a slice of 5MiB with random data. 226 data := make([]byte, 5*humanize.MiByte) 227 length := int64(len(data)) 228 _, err = rand.Read(data) 229 if err != nil { 230 t.Fatal(err) 231 } 232 233 writers := make([]io.Writer, len(disks)) 234 for i, disk := range disks { 235 if disk == nil { 236 continue 237 } 238 writers[i] = newBitrotWriter(disk, "testbucket", "object", 239 erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize(), false) 240 } 241 242 // 10000 iterations with random offsets and lengths. 243 iterations := 10000 244 245 // Create a test file to read from. 246 buffer := make([]byte, blockSize, 2*blockSize) 247 n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) 248 closeBitrotWriters(writers) 249 if err != nil { 250 t.Fatal(err) 251 } 252 if n != length { 253 t.Errorf("erasureCreateFile returned %d, expected %d", n, length) 254 } 255 256 // To generate random offset/length. 257 r := rand.New(rand.NewSource(UTCNow().UnixNano())) 258 259 buf := &bytes.Buffer{} 260 261 // Verify erasure.Decode() for random offsets and lengths. 262 for i := 0; i < iterations; i++ { 263 offset := r.Int63n(length) 264 readLen := r.Int63n(length - offset) 265 266 expected := data[offset : offset+readLen] 267 268 // Get the checksums of the current part. 269 bitrotReaders := make([]io.ReaderAt, len(disks)) 270 for index, disk := range disks { 271 if disk == OfflineDisk { 272 continue 273 } 274 tillOffset := erasure.ShardFileOffset(offset, readLen, length) 275 bitrotReaders[index] = newStreamingBitrotReader(disk, nil, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) 276 } 277 _, err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length, nil) 278 closeBitrotReaders(bitrotReaders) 279 if err != nil { 280 t.Fatal(err, offset, readLen) 281 } 282 got := buf.Bytes() 283 if !bytes.Equal(expected, got) { 284 t.Fatalf("read data is different from what was expected, offset=%d length=%d", offset, readLen) 285 } 286 buf.Reset() 287 } 288 } 289 290 // Benchmarks 291 292 func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64, b *testing.B) { 293 setup, err := newErasureTestSetup(data, parity, blockSizeV2) 294 if err != nil { 295 b.Fatalf("failed to create test setup: %v", err) 296 } 297 defer setup.Remove() 298 disks := setup.disks 299 erasure, err := NewErasure(context.Background(), data, parity, blockSizeV2) 300 if err != nil { 301 b.Fatalf("failed to create ErasureStorage: %v", err) 302 } 303 304 writers := make([]io.Writer, len(disks)) 305 for i, disk := range disks { 306 if disk == nil { 307 continue 308 } 309 writers[i] = newBitrotWriter(disk, "testbucket", "object", 310 erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize(), false) 311 } 312 313 content := make([]byte, size) 314 buffer := make([]byte, blockSizeV2, 2*blockSizeV2) 315 _, err = erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1) 316 closeBitrotWriters(writers) 317 if err != nil { 318 b.Fatalf("failed to create erasure test file: %v", err) 319 } 320 321 for i := 0; i < dataDown; i++ { 322 writers[i] = nil 323 } 324 for i := data; i < data+parityDown; i++ { 325 writers[i] = nil 326 } 327 328 b.ResetTimer() 329 b.SetBytes(size) 330 b.ReportAllocs() 331 for i := 0; i < b.N; i++ { 332 bitrotReaders := make([]io.ReaderAt, len(disks)) 333 for index, disk := range disks { 334 if writers[index] == nil { 335 continue 336 } 337 tillOffset := erasure.ShardFileOffset(0, size, size) 338 bitrotReaders[index] = newStreamingBitrotReader(disk, nil, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) 339 } 340 if _, err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size, nil); err != nil { 341 panic(err) 342 } 343 closeBitrotReaders(bitrotReaders) 344 } 345 } 346 347 func BenchmarkErasureDecodeQuick(b *testing.B) { 348 const size = 12 * 1024 * 1024 349 b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 0, size, b) }) 350 b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 1, size, b) }) 351 b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 0, size, b) }) 352 b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 1, size, b) }) 353 } 354 355 func BenchmarkErasureDecode_4_64KB(b *testing.B) { 356 const size = 64 * 1024 357 b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 0, size, b) }) 358 b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 1, size, b) }) 359 b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 0, size, b) }) 360 b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 1, size, b) }) 361 b.Run(" 00|XX ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 2, size, b) }) 362 b.Run(" XX|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 2, 0, size, b) }) 363 } 364 365 func BenchmarkErasureDecode_8_20MB(b *testing.B) { 366 const size = 20 * 1024 * 1024 367 b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 0, size, b) }) 368 b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 1, size, b) }) 369 b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 1, 0, size, b) }) 370 b.Run(" X000|X000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 1, 1, size, b) }) 371 b.Run(" 0000|XXXX ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 4, size, b) }) 372 b.Run(" XX00|XX00 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 2, 2, size, b) }) 373 b.Run(" XXXX|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 4, 0, size, b) }) 374 } 375 376 func BenchmarkErasureDecode_12_30MB(b *testing.B) { 377 const size = 30 * 1024 * 1024 378 b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 0, size, b) }) 379 b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 1, size, b) }) 380 b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 1, 0, size, b) }) 381 b.Run(" X00000|X00000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 1, 1, size, b) }) 382 b.Run(" 000000|XXXXXX ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 6, size, b) }) 383 b.Run(" XXX000|XXX000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 3, 3, size, b) }) 384 b.Run(" XXXXXX|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 6, 0, size, b) }) 385 } 386 387 func BenchmarkErasureDecode_16_40MB(b *testing.B) { 388 const size = 40 * 1024 * 1024 389 b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 0, size, b) }) 390 b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 1, size, b) }) 391 b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 1, 0, size, b) }) 392 b.Run(" X0000000|X0000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 1, 1, size, b) }) 393 b.Run(" 00000000|XXXXXXXX ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 8, size, b) }) 394 b.Run(" XXXX0000|XXXX0000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 4, 4, size, b) }) 395 b.Run(" XXXXXXXX|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 8, 0, size, b) }) 396 }