github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/erasure-common.go (about)

     1  // Copyright (c) 2015-2021 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package cmd
    19  
    20  import (
    21  	"context"
    22  	"fmt"
    23  	"io"
    24  	"math/rand"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/minio/minio/internal/logger"
    29  	"github.com/minio/pkg/v2/sync/errgroup"
    30  )
    31  
    32  func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
    33  	disks := er.getDisks()
    34  	var wg sync.WaitGroup
    35  	var mu sync.Mutex
    36  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
    37  	for _, i := range r.Perm(len(disks)) {
    38  		i := i
    39  		wg.Add(1)
    40  		go func() {
    41  			defer wg.Done()
    42  			if disks[i] == nil {
    43  				return
    44  			}
    45  			di, err := disks[i].DiskInfo(context.Background(), DiskInfoOptions{})
    46  			if err != nil || di.Healing {
    47  				// - Do not consume disks which are not reachable
    48  				//   unformatted or simply not accessible for some reason.
    49  				//
    50  				// - Do not consume disks which are being healed
    51  				//
    52  				// - Future: skip busy disks
    53  				return
    54  			}
    55  
    56  			mu.Lock()
    57  			newDisks = append(newDisks, disks[i])
    58  			mu.Unlock()
    59  		}()
    60  	}
    61  	wg.Wait()
    62  	return newDisks
    63  }
    64  
    65  func (er erasureObjects) getOnlineLocalDisks() (newDisks []StorageAPI) {
    66  	disks := er.getOnlineDisks()
    67  
    68  	// Based on the random shuffling return back randomized disks.
    69  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
    70  
    71  	for _, i := range r.Perm(len(disks)) {
    72  		if disks[i] != nil && disks[i].IsLocal() {
    73  			newDisks = append(newDisks, disks[i])
    74  		}
    75  	}
    76  
    77  	return newDisks
    78  }
    79  
    80  func (er erasureObjects) getLocalDisks() (newDisks []StorageAPI) {
    81  	disks := er.getDisks()
    82  	// Based on the random shuffling return back randomized disks.
    83  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
    84  	for _, i := range r.Perm(len(disks)) {
    85  		if disks[i] != nil && disks[i].IsLocal() {
    86  			newDisks = append(newDisks, disks[i])
    87  		}
    88  	}
    89  	return newDisks
    90  }
    91  
    92  // readMultipleFiles Reads raw data from all specified files from all disks.
    93  func readMultipleFiles(ctx context.Context, disks []StorageAPI, req ReadMultipleReq, readQuorum int) ([]ReadMultipleResp, error) {
    94  	resps := make([]chan ReadMultipleResp, len(disks))
    95  	for i := range resps {
    96  		resps[i] = make(chan ReadMultipleResp, len(req.Files))
    97  	}
    98  	g := errgroup.WithNErrs(len(disks))
    99  	// Read files in parallel across disks.
   100  	for index := range disks {
   101  		index := index
   102  		g.Go(func() (err error) {
   103  			if disks[index] == nil {
   104  				return errDiskNotFound
   105  			}
   106  			return disks[index].ReadMultiple(ctx, req, resps[index])
   107  		}, index)
   108  	}
   109  
   110  	dataArray := make([]ReadMultipleResp, 0, len(req.Files))
   111  	// Merge results. They should come in order from each.
   112  	for _, wantFile := range req.Files {
   113  		quorum := 0
   114  		toAdd := ReadMultipleResp{
   115  			Bucket: req.Bucket,
   116  			Prefix: req.Prefix,
   117  			File:   wantFile,
   118  		}
   119  		for i := range resps {
   120  			if disks[i] == nil {
   121  				continue
   122  			}
   123  			select {
   124  			case <-ctx.Done():
   125  			case gotFile, ok := <-resps[i]:
   126  				if !ok {
   127  					continue
   128  				}
   129  				if gotFile.Error != "" || !gotFile.Exists {
   130  					continue
   131  				}
   132  				if gotFile.File != wantFile || gotFile.Bucket != req.Bucket || gotFile.Prefix != req.Prefix {
   133  					continue
   134  				}
   135  				quorum++
   136  				if toAdd.Modtime.After(gotFile.Modtime) || len(gotFile.Data) < len(toAdd.Data) {
   137  					// Pick latest, or largest to avoid possible truncated entries.
   138  					continue
   139  				}
   140  				toAdd = gotFile
   141  			}
   142  		}
   143  		if quorum < readQuorum {
   144  			toAdd.Exists = false
   145  			toAdd.Error = errErasureReadQuorum.Error()
   146  			toAdd.Data = nil
   147  		}
   148  		dataArray = append(dataArray, toAdd)
   149  	}
   150  
   151  	ignoredErrs := []error{
   152  		errFileNotFound,
   153  		errVolumeNotFound,
   154  		errFileVersionNotFound,
   155  		io.ErrUnexpectedEOF, // some times we would read without locks, ignore these errors
   156  		io.EOF,              // some times we would read without locks, ignore these errors
   157  	}
   158  	ignoredErrs = append(ignoredErrs, objectOpIgnoredErrs...)
   159  
   160  	errs := g.Wait()
   161  	for index, err := range errs {
   162  		if err == nil {
   163  			continue
   164  		}
   165  		if !IsErr(err, ignoredErrs...) {
   166  			logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
   167  				disks[index], req.Bucket, req.Prefix, err),
   168  				disks[index].String())
   169  		}
   170  	}
   171  
   172  	// Return all the metadata.
   173  	return dataArray, nil
   174  }