github.com/masterhung0112/hk_server/v5@v5.0.0-20220302090640-ec71aef15e1c/shared/filestore/s3store.go (about) 1 // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. 2 // See LICENSE.txt for license information. 3 4 package filestore 5 6 import ( 7 "context" 8 "io" 9 "io/ioutil" 10 "os" 11 "path/filepath" 12 "strings" 13 "time" 14 15 s3 "github.com/minio/minio-go/v7" 16 "github.com/minio/minio-go/v7/pkg/credentials" 17 "github.com/minio/minio-go/v7/pkg/encrypt" 18 "github.com/pkg/errors" 19 20 "github.com/masterhung0112/hk_server/v5/shared/mlog" 21 ) 22 23 // S3FileBackend contains all necessary information to communicate with 24 // an AWS S3 compatible API backend. 25 type S3FileBackend struct { 26 endpoint string 27 accessKey string 28 secretKey string 29 secure bool 30 signV2 bool 31 region string 32 bucket string 33 pathPrefix string 34 encrypt bool 35 trace bool 36 client *s3.Client 37 } 38 39 type S3FileBackendAuthError struct { 40 DetailedError string 41 } 42 43 // S3FileBackendNoBucketError is returned when testing a connection and no S3 bucket is found 44 type S3FileBackendNoBucketError struct{} 45 46 const ( 47 // This is not exported by minio. See: https://github.com/minio/minio-go/issues/1339 48 bucketNotFound = "NoSuchBucket" 49 ) 50 51 var ( 52 imageExtensions = map[string]bool{".jpg": true, ".jpeg": true, ".gif": true, ".bmp": true, ".png": true, ".tiff": true, "tif": true} 53 imageMimeTypes = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff", ".tif": "image/tif"} 54 ) 55 56 func isFileExtImage(ext string) bool { 57 ext = strings.ToLower(ext) 58 return imageExtensions[ext] 59 } 60 61 func getImageMimeType(ext string) string { 62 ext = strings.ToLower(ext) 63 if imageMimeTypes[ext] == "" { 64 return "image" 65 } 66 return imageMimeTypes[ext] 67 } 68 69 func (s *S3FileBackendAuthError) Error() string { 70 return s.DetailedError 71 } 72 73 func (s *S3FileBackendNoBucketError) Error() string { 74 return "no such bucket" 75 } 76 77 // NewS3FileBackend returns an instance of an S3FileBackend. 78 func NewS3FileBackend(settings FileBackendSettings) (*S3FileBackend, error) { 79 backend := &S3FileBackend{ 80 endpoint: settings.AmazonS3Endpoint, 81 accessKey: settings.AmazonS3AccessKeyId, 82 secretKey: settings.AmazonS3SecretAccessKey, 83 secure: settings.AmazonS3SSL, 84 signV2: settings.AmazonS3SignV2, 85 region: settings.AmazonS3Region, 86 bucket: settings.AmazonS3Bucket, 87 pathPrefix: settings.AmazonS3PathPrefix, 88 encrypt: settings.AmazonS3SSE, 89 trace: settings.AmazonS3Trace, 90 } 91 cli, err := backend.s3New() 92 if err != nil { 93 return nil, err 94 } 95 backend.client = cli 96 return backend, nil 97 } 98 99 // Similar to s3.New() but allows initialization of signature v2 or signature v4 client. 100 // If signV2 input is false, function always returns signature v4. 101 // 102 // Additionally this function also takes a user defined region, if set 103 // disables automatic region lookup. 104 func (b *S3FileBackend) s3New() (*s3.Client, error) { 105 var creds *credentials.Credentials 106 107 isCloud := os.Getenv("MM_CLOUD_FILESTORE_BIFROST") != "" 108 if isCloud { 109 creds = credentials.New(customProvider{isSignV2: b.signV2}) 110 } else if b.accessKey == "" && b.secretKey == "" { 111 creds = credentials.NewIAM("") 112 } else if b.signV2 { 113 creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV2) 114 } else { 115 creds = credentials.NewStatic(b.accessKey, b.secretKey, "", credentials.SignatureV4) 116 } 117 118 opts := s3.Options{ 119 Creds: creds, 120 Secure: b.secure, 121 Region: b.region, 122 } 123 124 // If this is a cloud installation, we override the default transport. 125 if isCloud { 126 tr, err := s3.DefaultTransport(b.secure) 127 if err != nil { 128 return nil, err 129 } 130 scheme := "http" 131 if b.secure { 132 scheme = "https" 133 } 134 opts.Transport = &customTransport{ 135 base: tr, 136 host: b.endpoint, 137 scheme: scheme, 138 } 139 } 140 141 s3Clnt, err := s3.New(b.endpoint, &opts) 142 if err != nil { 143 return nil, err 144 } 145 146 if b.trace { 147 s3Clnt.TraceOn(os.Stdout) 148 } 149 150 return s3Clnt, nil 151 } 152 153 func (b *S3FileBackend) TestConnection() error { 154 exists := true 155 var err error 156 // If a path prefix is present, we attempt to test the bucket by listing objects under the path 157 // and just checking the first response. This is because the BucketExists call is only at a bucket level 158 // and sometimes the user might only be allowed access to the specified path prefix. 159 if b.pathPrefix != "" { 160 obj := <-b.client.ListObjects(context.Background(), b.bucket, s3.ListObjectsOptions{Prefix: b.pathPrefix}) 161 if obj.Err != nil { 162 typedErr := s3.ToErrorResponse(obj.Err) 163 if typedErr.Code != bucketNotFound { 164 return &S3FileBackendAuthError{DetailedError: "unable to list objects in the S3 bucket"} 165 } 166 exists = false 167 } 168 } else { 169 exists, err = b.client.BucketExists(context.Background(), b.bucket) 170 if err != nil { 171 return &S3FileBackendAuthError{DetailedError: "unable to check if the S3 bucket exists"} 172 } 173 } 174 175 if !exists { 176 return &S3FileBackendNoBucketError{} 177 } 178 mlog.Debug("Connection to S3 or minio is good. Bucket exists.") 179 return nil 180 } 181 182 func (b *S3FileBackend) MakeBucket() error { 183 err := b.client.MakeBucket(context.Background(), b.bucket, s3.MakeBucketOptions{Region: b.region}) 184 if err != nil { 185 return errors.Wrap(err, "unable to create the s3 bucket") 186 } 187 return nil 188 } 189 190 // Caller must close the first return value 191 func (b *S3FileBackend) Reader(path string) (ReadCloseSeeker, error) { 192 path = filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 193 minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{}) 194 if err != nil { 195 return nil, errors.Wrapf(err, "unable to open file %s", path) 196 } 197 198 return minioObject, nil 199 } 200 201 func (b *S3FileBackend) ReadFile(path string) ([]byte, error) { 202 path = filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 203 minioObject, err := b.client.GetObject(context.Background(), b.bucket, path, s3.GetObjectOptions{}) 204 if err != nil { 205 return nil, errors.Wrapf(err, "unable to open file %s", path) 206 } 207 208 defer minioObject.Close() 209 f, err := ioutil.ReadAll(minioObject) 210 if err != nil { 211 return nil, errors.Wrapf(err, "unable to read file %s", path) 212 } 213 return f, nil 214 } 215 216 func (b *S3FileBackend) FileExists(path string) (bool, error) { 217 path = filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 218 219 _, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{}) 220 if err == nil { 221 return true, nil 222 } 223 224 var s3Err s3.ErrorResponse 225 if errors.As(err, &s3Err); s3Err.Code == "NoSuchKey" { 226 return false, nil 227 } 228 229 return false, errors.Wrapf(err, "unable to know if file %s exists", path) 230 } 231 232 func (b *S3FileBackend) FileSize(path string) (int64, error) { 233 path = filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 234 235 info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{}) 236 if err != nil { 237 return 0, errors.Wrapf(err, "unable to get file size for %s", path) 238 } 239 240 return info.Size, nil 241 } 242 243 func (b *S3FileBackend) FileModTime(path string) (time.Time, error) { 244 path = filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 245 246 info, err := b.client.StatObject(context.Background(), b.bucket, path, s3.StatObjectOptions{}) 247 if err != nil { 248 return time.Time{}, errors.Wrapf(err, "unable to get modification time for file %s", path) 249 } 250 251 return info.LastModified, nil 252 } 253 254 func (b *S3FileBackend) CopyFile(oldPath, newPath string) error { 255 oldPath = filepath.ToSlash(filepath.Join(b.pathPrefix, oldPath)) 256 newPath = filepath.ToSlash(filepath.Join(b.pathPrefix, newPath)) 257 srcOpts := s3.CopySrcOptions{ 258 Bucket: b.bucket, 259 Object: oldPath, 260 Encryption: encrypt.NewSSE(), 261 } 262 dstOpts := s3.CopyDestOptions{ 263 Bucket: b.bucket, 264 Object: newPath, 265 Encryption: encrypt.NewSSE(), 266 } 267 if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { 268 return errors.Wrapf(err, "unable to copy file from %s to %s", oldPath, newPath) 269 } 270 return nil 271 } 272 273 func (b *S3FileBackend) MoveFile(oldPath, newPath string) error { 274 oldPath = filepath.ToSlash(filepath.Join(b.pathPrefix, oldPath)) 275 newPath = filepath.ToSlash(filepath.Join(b.pathPrefix, newPath)) 276 srcOpts := s3.CopySrcOptions{ 277 Bucket: b.bucket, 278 Object: oldPath, 279 Encryption: encrypt.NewSSE(), 280 } 281 dstOpts := s3.CopyDestOptions{ 282 Bucket: b.bucket, 283 Object: newPath, 284 Encryption: encrypt.NewSSE(), 285 } 286 287 if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { 288 return errors.Wrapf(err, "unable to copy the file to %s to the new destionation", newPath) 289 } 290 291 if err := b.client.RemoveObject(context.Background(), b.bucket, oldPath, s3.RemoveObjectOptions{}); err != nil { 292 return errors.Wrapf(err, "unable to remove the file old file %s", oldPath) 293 } 294 295 return nil 296 } 297 298 func (b *S3FileBackend) WriteFile(fr io.Reader, path string) (int64, error) { 299 var contentType string 300 path = filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 301 if ext := filepath.Ext(path); isFileExtImage(ext) { 302 contentType = getImageMimeType(ext) 303 } else { 304 contentType = "binary/octet-stream" 305 } 306 307 options := s3PutOptions(b.encrypt, contentType) 308 info, err := b.client.PutObject(context.Background(), b.bucket, path, fr, -1, options) 309 if err != nil { 310 return info.Size, errors.Wrapf(err, "unable write the data in the file %s", path) 311 } 312 313 return info.Size, nil 314 } 315 316 func (b *S3FileBackend) AppendFile(fr io.Reader, path string) (int64, error) { 317 fp := filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 318 if _, err := b.client.StatObject(context.Background(), b.bucket, fp, s3.StatObjectOptions{}); err != nil { 319 return 0, errors.Wrapf(err, "unable to find the file %s to append the data", path) 320 } 321 322 var contentType string 323 if ext := filepath.Ext(fp); isFileExtImage(ext) { 324 contentType = getImageMimeType(ext) 325 } else { 326 contentType = "binary/octet-stream" 327 } 328 329 options := s3PutOptions(b.encrypt, contentType) 330 sse := options.ServerSideEncryption 331 partName := fp + ".part" 332 info, err := b.client.PutObject(context.Background(), b.bucket, partName, fr, -1, options) 333 defer b.client.RemoveObject(context.Background(), b.bucket, partName, s3.RemoveObjectOptions{}) 334 if info.Size > 0 { 335 src1Opts := s3.CopySrcOptions{ 336 Bucket: b.bucket, 337 Object: fp, 338 } 339 src2Opts := s3.CopySrcOptions{ 340 Bucket: b.bucket, 341 Object: partName, 342 } 343 dstOpts := s3.CopyDestOptions{ 344 Bucket: b.bucket, 345 Object: fp, 346 Encryption: sse, 347 } 348 _, err = b.client.ComposeObject(context.Background(), dstOpts, src1Opts, src2Opts) 349 if err != nil { 350 return 0, errors.Wrapf(err, "unable append the data in the file %s", path) 351 } 352 return info.Size, nil 353 } 354 355 return 0, errors.Wrapf(err, "unable append the data in the file %s", path) 356 } 357 358 func (b *S3FileBackend) RemoveFile(path string) error { 359 path = filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 360 if err := b.client.RemoveObject(context.Background(), b.bucket, path, s3.RemoveObjectOptions{}); err != nil { 361 return errors.Wrapf(err, "unable to remove the file %s", path) 362 } 363 364 return nil 365 } 366 367 func getPathsFromObjectInfos(in <-chan s3.ObjectInfo) <-chan s3.ObjectInfo { 368 out := make(chan s3.ObjectInfo, 1) 369 370 go func() { 371 defer close(out) 372 373 for { 374 info, done := <-in 375 376 if !done { 377 break 378 } 379 380 out <- info 381 } 382 }() 383 384 return out 385 } 386 387 func (b *S3FileBackend) ListDirectory(path string) ([]string, error) { 388 path = filepath.ToSlash(filepath.Join(b.pathPrefix, path)) 389 if !strings.HasSuffix(path, "/") && path != "" { 390 // s3Clnt returns only the path itself when "/" is not present 391 // appending "/" to make it consistent across all filestores 392 path = path + "/" 393 } 394 395 opts := s3.ListObjectsOptions{ 396 Prefix: path, 397 } 398 var paths []string 399 for object := range b.client.ListObjects(context.Background(), b.bucket, opts) { 400 if object.Err != nil { 401 return nil, errors.Wrapf(object.Err, "unable to list the directory %s", path) 402 } 403 // We strip the path prefix that gets applied, 404 // so that it remains transparent to the application. 405 object.Key = strings.TrimPrefix(object.Key, b.pathPrefix) 406 trimmed := strings.Trim(object.Key, "/") 407 if trimmed != "" { 408 paths = append(paths, trimmed) 409 } 410 } 411 412 return paths, nil 413 } 414 415 func (b *S3FileBackend) RemoveDirectory(path string) error { 416 opts := s3.ListObjectsOptions{ 417 Prefix: filepath.ToSlash(filepath.Join(b.pathPrefix, path)), 418 Recursive: true, 419 } 420 list := b.client.ListObjects(context.Background(), b.bucket, opts) 421 objectsCh := b.client.RemoveObjects(context.Background(), b.bucket, getPathsFromObjectInfos(list), s3.RemoveObjectsOptions{}) 422 for err := range objectsCh { 423 if err.Err != nil { 424 return errors.Wrapf(err.Err, "unable to remove the directory %s", path) 425 } 426 } 427 428 return nil 429 } 430 431 func s3PutOptions(encrypted bool, contentType string) s3.PutObjectOptions { 432 options := s3.PutObjectOptions{} 433 if encrypted { 434 options.ServerSideEncryption = encrypt.NewSSE() 435 } 436 options.ContentType = contentType 437 // We set the part size to the minimum allowed value of 5MBs 438 // to avoid an excessive allocation in minio.PutObject implementation. 439 options.PartSize = 1024 * 1024 * 5 440 441 return options 442 }