storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/erasure-bucket.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "context" 21 "errors" 22 23 "github.com/minio/minio-go/v7/pkg/s3utils" 24 25 "storj.io/minio/cmd/logger" 26 "storj.io/minio/pkg/sync/errgroup" 27 ) 28 29 // list all errors that can be ignore in a bucket operation. 30 var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk) 31 32 // list all errors that can be ignored in a bucket metadata operation. 33 var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound) 34 35 /// Bucket operations 36 37 // MakeBucket - make a bucket. 38 func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { 39 // Verify if bucket is valid. 40 if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil { 41 return BucketNameInvalid{Bucket: bucket} 42 } 43 44 storageDisks := er.getDisks() 45 46 g := errgroup.WithNErrs(len(storageDisks)) 47 48 // Make a volume entry on all underlying storage disks. 49 for index := range storageDisks { 50 index := index 51 g.Go(func() error { 52 if storageDisks[index] != nil { 53 if err := storageDisks[index].MakeVol(ctx, bucket); err != nil { 54 if !errors.Is(err, errVolumeExists) { 55 logger.LogIf(ctx, err) 56 } 57 return err 58 } 59 return nil 60 } 61 return errDiskNotFound 62 }, index) 63 } 64 65 writeQuorum := getWriteQuorum(len(storageDisks)) 66 err := reduceWriteQuorumErrs(ctx, g.Wait(), bucketOpIgnoredErrs, writeQuorum) 67 return toObjectErr(err, bucket) 68 } 69 70 func undoDeleteBucket(storageDisks []StorageAPI, bucket string) { 71 g := errgroup.WithNErrs(len(storageDisks)) 72 // Undo previous make bucket entry on all underlying storage disks. 73 for index := range storageDisks { 74 if storageDisks[index] == nil { 75 continue 76 } 77 index := index 78 g.Go(func() error { 79 _ = storageDisks[index].MakeVol(context.Background(), bucket) 80 return nil 81 }, index) 82 } 83 84 // Wait for all make vol to finish. 85 g.Wait() 86 } 87 88 // getBucketInfo - returns the BucketInfo from one of the load balanced disks. 89 func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) { 90 storageDisks := er.getDisks() 91 92 g := errgroup.WithNErrs(len(storageDisks)) 93 var bucketsInfo = make([]BucketInfo, len(storageDisks)) 94 // Undo previous make bucket entry on all underlying storage disks. 95 for index := range storageDisks { 96 index := index 97 g.Go(func() error { 98 if storageDisks[index] == nil { 99 return errDiskNotFound 100 } 101 volInfo, err := storageDisks[index].StatVol(ctx, bucketName) 102 if err != nil { 103 return err 104 } 105 bucketsInfo[index] = BucketInfo(volInfo) 106 return nil 107 }, index) 108 } 109 110 errs := g.Wait() 111 112 for i, err := range errs { 113 if err == nil { 114 return bucketsInfo[i], nil 115 } 116 } 117 118 // If all our errors were ignored, then we try to 119 // reduce to one error based on read quorum. 120 // `nil` is deliberately passed for ignoredErrs 121 // because these errors were already ignored. 122 readQuorum := getReadQuorum(len(storageDisks)) 123 return BucketInfo{}, reduceReadQuorumErrs(ctx, errs, nil, readQuorum) 124 } 125 126 // GetBucketInfo - returns BucketInfo for a bucket. 127 func (er erasureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { 128 bucketInfo, err := er.getBucketInfo(ctx, bucket) 129 if err != nil { 130 return bi, toObjectErr(err, bucket) 131 } 132 return bucketInfo, nil 133 } 134 135 // Dangling buckets should be handled appropriately, in this following situation 136 // we actually have quorum error to be `nil` but we have some disks where 137 // the bucket delete returned `errVolumeNotEmpty` but this is not correct 138 // can only happen if there are dangling objects in a bucket. Under such 139 // a situation we simply attempt a full delete of the bucket including 140 // the dangling objects. All of this happens under a lock and there 141 // is no way a user can create buckets and sneak in objects into namespace, 142 // so it is safer to do. 143 func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs []error, bucket string) { 144 for index, err := range dErrs { 145 if err == errVolumeNotEmpty { 146 // Attempt to delete bucket again. 147 if derr := storageDisks[index].DeleteVol(ctx, bucket, false); derr == errVolumeNotEmpty { 148 _ = storageDisks[index].Delete(ctx, bucket, "", true) 149 150 _ = storageDisks[index].DeleteVol(ctx, bucket, false) 151 152 // Cleanup all the previously incomplete multiparts. 153 _ = storageDisks[index].Delete(ctx, minioMetaMultipartBucket, bucket, true) 154 } 155 } 156 } 157 } 158 159 // DeleteBucket - deletes a bucket. 160 func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { 161 // Collect if all disks report volume not found. 162 defer ObjectPathUpdated(bucket + slashSeparator) 163 storageDisks := er.getDisks() 164 165 g := errgroup.WithNErrs(len(storageDisks)) 166 167 for index := range storageDisks { 168 index := index 169 g.Go(func() error { 170 if storageDisks[index] != nil { 171 if err := storageDisks[index].DeleteVol(ctx, bucket, forceDelete); err != nil { 172 return err 173 } 174 if err := storageDisks[index].Delete(ctx, minioMetaMultipartBucket, bucket, true); err != errFileNotFound { 175 return err 176 } 177 return nil 178 } 179 return errDiskNotFound 180 }, index) 181 } 182 183 // Wait for all the delete vols to finish. 184 dErrs := g.Wait() 185 186 if forceDelete { 187 for _, err := range dErrs { 188 if err != nil { 189 undoDeleteBucket(storageDisks, bucket) 190 return toObjectErr(err, bucket) 191 } 192 } 193 194 return nil 195 } 196 197 writeQuorum := getWriteQuorum(len(storageDisks)) 198 err := reduceWriteQuorumErrs(ctx, dErrs, bucketOpIgnoredErrs, writeQuorum) 199 if err == errErasureWriteQuorum { 200 undoDeleteBucket(storageDisks, bucket) 201 } 202 if err != nil { 203 return toObjectErr(err, bucket) 204 } 205 206 // If we reduce quorum to nil, means we have deleted buckets properly 207 // on some servers in quorum, we should look for volumeNotEmpty errors 208 // and delete those buckets as well. 209 // 210 // let this call succeed, even if client cancels the context 211 // this is to ensure that we don't leave any stale content 212 deleteDanglingBucket(context.Background(), storageDisks, dErrs, bucket) 213 214 return nil 215 } 216 217 // IsNotificationSupported returns whether bucket notification is applicable for this layer. 218 func (er erasureObjects) IsNotificationSupported() bool { 219 return true 220 } 221 222 // IsListenSupported returns whether listen bucket notification is applicable for this layer. 223 func (er erasureObjects) IsListenSupported() bool { 224 return true 225 } 226 227 // IsEncryptionSupported returns whether server side encryption is implemented for this layer. 228 func (er erasureObjects) IsEncryptionSupported() bool { 229 return true 230 } 231 232 // IsCompressionSupported returns whether compression is applicable for this layer. 233 func (er erasureObjects) IsCompressionSupported() bool { 234 return true 235 } 236 237 // IsTaggingSupported indicates whether erasureObjects implements tagging support. 238 func (er erasureObjects) IsTaggingSupported() bool { 239 return true 240 }