storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/erasure-common.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package cmd
    18  
    19  import (
    20  	"context"
    21  	"sync"
    22  
    23  	"storj.io/minio/pkg/sync/errgroup"
    24  )
    25  
    26  func (er erasureObjects) getLocalDisks() (localDisks []StorageAPI) {
    27  	disks := er.getDisks()
    28  	for _, disk := range disks {
    29  		if disk != nil && disk.IsLocal() {
    30  			localDisks = append(localDisks, disk)
    31  		}
    32  	}
    33  	return localDisks
    34  }
    35  
    36  func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) {
    37  	disks := er.getDisks()
    38  	// Based on the random shuffling return back randomized disks.
    39  	for _, i := range hashOrder(UTCNow().String(), len(disks)) {
    40  		if disks[i-1] != nil && disks[i-1].IsLocal() {
    41  			if disks[i-1].Healing() == nil && disks[i-1].IsOnline() {
    42  				newDisks = append(newDisks, disks[i-1])
    43  			}
    44  		}
    45  	}
    46  	return newDisks
    47  }
    48  
    49  func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
    50  	disks := er.getDisks()
    51  	var wg sync.WaitGroup
    52  	var mu sync.Mutex
    53  	for _, i := range hashOrder(UTCNow().String(), len(disks)) {
    54  		i := i
    55  		wg.Add(1)
    56  		go func() {
    57  			defer wg.Done()
    58  			if disks[i-1] == nil {
    59  				return
    60  			}
    61  			di, err := disks[i-1].DiskInfo(context.Background())
    62  			if err != nil || di.Healing {
    63  
    64  				// - Do not consume disks which are not reachable
    65  				//   unformatted or simply not accessible for some reason.
    66  				//
    67  				// - Do not consume disks which are being healed
    68  				//
    69  				// - Future: skip busy disks
    70  				return
    71  			}
    72  
    73  			mu.Lock()
    74  			newDisks = append(newDisks, disks[i-1])
    75  			mu.Unlock()
    76  		}()
    77  	}
    78  	wg.Wait()
    79  	return newDisks
    80  }
    81  
    82  // getLoadBalancedNDisks - fetches load balanced (sufficiently randomized) disk slice
    83  // with N disks online. If ndisks is zero or negative, then it will returns all disks,
    84  // same if ndisks is greater than the number of all disks.
    85  func (er erasureObjects) getLoadBalancedNDisks(ndisks int) (newDisks []StorageAPI) {
    86  	disks := er.getLoadBalancedDisks(ndisks != -1)
    87  	for _, disk := range disks {
    88  		if disk == nil {
    89  			continue
    90  		}
    91  		newDisks = append(newDisks, disk)
    92  		ndisks--
    93  		if ndisks == 0 {
    94  			break
    95  		}
    96  	}
    97  	return
    98  }
    99  
   100  // getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
   101  // ensures to skip disks if they are not healing and online.
   102  func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {
   103  	disks := er.getDisks()
   104  
   105  	if !optimized {
   106  		var newDisks []StorageAPI
   107  		for _, i := range hashOrder(UTCNow().String(), len(disks)) {
   108  			newDisks = append(newDisks, disks[i-1])
   109  		}
   110  		return newDisks
   111  	}
   112  
   113  	var wg sync.WaitGroup
   114  	var mu sync.Mutex
   115  	var newDisks = map[uint64][]StorageAPI{}
   116  	// Based on the random shuffling return back randomized disks.
   117  	for _, i := range hashOrder(UTCNow().String(), len(disks)) {
   118  		i := i
   119  		wg.Add(1)
   120  		go func() {
   121  			defer wg.Done()
   122  			if disks[i-1] == nil {
   123  				return
   124  			}
   125  			di, err := disks[i-1].DiskInfo(context.Background())
   126  			if err != nil || di.Healing {
   127  				// - Do not consume disks which are not reachable
   128  				//   unformatted or simply not accessible for some reason.
   129  				//
   130  				// - Do not consume disks which are being healed
   131  				//
   132  				// - Future: skip busy disks
   133  				return
   134  			}
   135  
   136  			mu.Lock()
   137  			// Capture disks usage wise upto resolution of MiB
   138  			newDisks[di.Used/1024/1024] = append(newDisks[di.Used/1024/1024], disks[i-1])
   139  			mu.Unlock()
   140  		}()
   141  	}
   142  	wg.Wait()
   143  
   144  	var max uint64
   145  	for k := range newDisks {
   146  		if k > max {
   147  			max = k
   148  		}
   149  	}
   150  
   151  	// Return disks which have maximum disk usage common.
   152  	return newDisks[max]
   153  }
   154  
   155  // This function does the following check, suppose
   156  // object is "a/b/c/d", stat makes sure that objects
   157  // - "a/b/c"
   158  // - "a/b"
   159  // - "a"
   160  // do not exist on the namespace.
   161  func (er erasureObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
   162  	storageDisks := er.getDisks()
   163  
   164  	g := errgroup.WithNErrs(len(storageDisks))
   165  
   166  	for index := range storageDisks {
   167  		index := index
   168  		g.Go(func() error {
   169  			if storageDisks[index] == nil {
   170  				return errDiskNotFound
   171  			}
   172  			// Check if 'prefix' is an object on this 'disk', else continue the check the next disk
   173  			return storageDisks[index].CheckFile(ctx, bucket, parent)
   174  		}, index)
   175  	}
   176  
   177  	// NOTE: Observe we are not trying to read `xl.meta` and figure out the actual
   178  	// quorum intentionally, but rely on the default case scenario. Actual quorum
   179  	// verification will happen by top layer by using getObjectInfo() and will be
   180  	// ignored if necessary.
   181  	readQuorum := getReadQuorum(len(storageDisks))
   182  
   183  	return reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil
   184  }