github.com/matrixorigin/matrixone@v0.7.0/pkg/fileservice/s3_fs.go (about) 1 // Copyright 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package fileservice 16 17 import ( 18 "bytes" 19 "context" 20 "errors" 21 "fmt" 22 "io" 23 "math" 24 stdhttp "net/http" 25 "net/url" 26 pathpkg "path" 27 "sort" 28 "strings" 29 "time" 30 31 "github.com/matrixorigin/matrixone/pkg/logutil" 32 "github.com/matrixorigin/matrixone/pkg/util/trace" 33 "go.uber.org/zap" 34 35 "github.com/aws/aws-sdk-go-v2/aws" 36 "github.com/aws/aws-sdk-go-v2/aws/transport/http" 37 "github.com/aws/aws-sdk-go-v2/config" 38 "github.com/aws/aws-sdk-go-v2/credentials" 39 "github.com/aws/aws-sdk-go-v2/credentials/stscreds" 40 "github.com/aws/aws-sdk-go-v2/service/s3" 41 "github.com/aws/aws-sdk-go-v2/service/s3/types" 42 "github.com/aws/aws-sdk-go-v2/service/sts" 43 "github.com/matrixorigin/matrixone/pkg/common/moerr" 44 ) 45 46 // S3FS is a FileService implementation backed by S3 47 type S3FS struct { 48 name string 49 client *s3.Client 50 bucket string 51 keyPrefix string 52 53 memCache *MemCache 54 diskCache *DiskCache 55 } 56 57 // key mapping scheme: 58 // <KeyPrefix>/<file path> -> file content 59 60 var _ FileService = new(S3FS) 61 62 func NewS3FS( 63 sharedConfigProfile string, 64 name string, 65 endpoint string, 66 bucket string, 67 keyPrefix string, 68 memCacheCapacity int64, 69 diskCacheCapacity int64, 70 diskCachePath string, 71 ) (*S3FS, error) { 72 73 fs, err := newS3FS([]string{ 74 "shared-config-profile=" + sharedConfigProfile, 75 "name=" + name, 76 "endpoint=" + endpoint, 77 "bucket=" + bucket, 78 "prefix=" + keyPrefix, 79 }) 80 if err != nil { 81 return nil, err 82 } 83 84 if err := fs.initCaches( 85 memCacheCapacity, 86 diskCacheCapacity, 87 diskCachePath, 88 ); err != nil { 89 return nil, err 90 } 91 92 return fs, nil 93 } 94 95 // NewS3FSOnMinio creates S3FS on minio server 96 // this is needed because the URL scheme of minio server does not compatible with AWS' 97 func NewS3FSOnMinio( 98 sharedConfigProfile string, 99 name string, 100 endpoint string, 101 bucket string, 102 keyPrefix string, 103 memCacheCapacity int64, 104 diskCacheCapacity int64, 105 diskCachePath string, 106 ) (*S3FS, error) { 107 108 fs, err := newS3FS([]string{ 109 "shared-config-profile=" + sharedConfigProfile, 110 "name=" + name, 111 "endpoint=" + endpoint, 112 "bucket=" + bucket, 113 "prefix=" + keyPrefix, 114 "is-minio=true", 115 }) 116 if err != nil { 117 return nil, err 118 } 119 120 if err := fs.initCaches( 121 memCacheCapacity, 122 diskCacheCapacity, 123 diskCachePath, 124 ); err != nil { 125 return nil, err 126 } 127 128 return fs, nil 129 } 130 131 func (s *S3FS) initCaches( 132 memCacheCapacity int64, 133 diskCacheCapacity int64, 134 diskCachePath string, 135 ) error { 136 137 // memory cache 138 if memCacheCapacity == 0 { 139 memCacheCapacity = 512 << 20 140 } 141 if memCacheCapacity > 0 { 142 s.memCache = NewMemCache(memCacheCapacity) 143 logutil.Info("fileservice: mem cache initialized", zap.Any("fs-name", s.name), zap.Any("capacity", memCacheCapacity)) 144 } 145 146 // disk cache 147 if diskCacheCapacity == 0 { 148 diskCacheCapacity = 8 << 30 149 } 150 if diskCacheCapacity > 0 && diskCachePath != "" { 151 var err error 152 s.diskCache, err = NewDiskCache(diskCachePath, diskCacheCapacity) 153 if err != nil { 154 return err 155 } 156 logutil.Info("fileservice: disk cache initialized", zap.Any("fs-name", s.name), zap.Any("capacity", memCacheCapacity)) 157 } 158 159 return nil 160 } 161 162 func (s *S3FS) Name() string { 163 return s.name 164 } 165 166 func (s *S3FS) List(ctx context.Context, dirPath string) (entries []DirEntry, err error) { 167 select { 168 case <-ctx.Done(): 169 return nil, ctx.Err() 170 default: 171 } 172 173 ctx, span := trace.Start(ctx, "S3FS.List") 174 defer span.End() 175 if ctx == nil { 176 ctx = context.Background() 177 } 178 179 path, err := ParsePathAtService(dirPath, s.name) 180 if err != nil { 181 return nil, err 182 } 183 prefix := s.pathToKey(path.File) 184 if prefix != "" { 185 prefix += "/" 186 } 187 var cont *string 188 189 for { 190 output, err := s.client.ListObjectsV2( 191 ctx, 192 &s3.ListObjectsV2Input{ 193 Bucket: ptrTo(s.bucket), 194 Delimiter: ptrTo("/"), 195 Prefix: ptrTo(prefix), 196 ContinuationToken: cont, 197 }, 198 ) 199 if err != nil { 200 return nil, err 201 } 202 203 for _, obj := range output.Contents { 204 filePath := s.keyToPath(*obj.Key) 205 filePath = strings.TrimRight(filePath, "/") 206 _, name := pathpkg.Split(filePath) 207 entries = append(entries, DirEntry{ 208 Name: name, 209 IsDir: false, 210 Size: obj.Size, 211 }) 212 } 213 214 for _, prefix := range output.CommonPrefixes { 215 filePath := s.keyToPath(*prefix.Prefix) 216 filePath = strings.TrimRight(filePath, "/") 217 _, name := pathpkg.Split(filePath) 218 entries = append(entries, DirEntry{ 219 Name: name, 220 IsDir: true, 221 }) 222 } 223 224 if output.ContinuationToken == nil || 225 *output.ContinuationToken == "" { 226 break 227 } 228 cont = output.ContinuationToken 229 } 230 231 return 232 } 233 234 func (s *S3FS) StatFile(ctx context.Context, filePath string) (*DirEntry, error) { 235 select { 236 case <-ctx.Done(): 237 return nil, ctx.Err() 238 default: 239 } 240 241 ctx, span := trace.Start(ctx, "S3FS.StatFile") 242 defer span.End() 243 if ctx == nil { 244 ctx = context.Background() 245 } 246 247 path, err := ParsePathAtService(filePath, s.name) 248 if err != nil { 249 return nil, err 250 } 251 key := s.pathToKey(path.File) 252 253 output, err := s.client.HeadObject( 254 ctx, 255 &s3.HeadObjectInput{ 256 Bucket: ptrTo(s.bucket), 257 Key: ptrTo(key), 258 }, 259 ) 260 if err != nil { 261 var httpError *http.ResponseError 262 if errors.As(err, &httpError) { 263 if httpError.Response.StatusCode == 404 { 264 return nil, moerr.NewFileNotFound(ctx, filePath) 265 } 266 } 267 if err != nil { 268 return nil, err 269 } 270 } 271 272 return &DirEntry{ 273 Name: pathpkg.Base(filePath), 274 IsDir: false, 275 Size: output.ContentLength, 276 }, nil 277 } 278 279 func (s *S3FS) Write(ctx context.Context, vector IOVector) error { 280 select { 281 case <-ctx.Done(): 282 return ctx.Err() 283 default: 284 } 285 286 ctx, span := trace.Start(ctx, "S3FS.Write") 287 defer span.End() 288 289 // check existence 290 path, err := ParsePathAtService(vector.FilePath, s.name) 291 if err != nil { 292 return err 293 } 294 key := s.pathToKey(path.File) 295 output, err := s.client.HeadObject( 296 ctx, 297 &s3.HeadObjectInput{ 298 Bucket: ptrTo(s.bucket), 299 Key: ptrTo(key), 300 }, 301 ) 302 if err != nil { 303 var httpError *http.ResponseError 304 if errors.As(err, &httpError) { 305 if httpError.Response.StatusCode == 404 { 306 // key not exists, ok 307 err = nil 308 } 309 } 310 if err != nil { 311 return err 312 } 313 } 314 if output != nil { 315 // key existed 316 return moerr.NewFileAlreadyExistsNoCtx(path.File) 317 } 318 319 return s.write(ctx, vector) 320 } 321 322 func (s *S3FS) write(ctx context.Context, vector IOVector) error { 323 ctx, span := trace.Start(ctx, "S3FS.write") 324 defer span.End() 325 path, err := ParsePathAtService(vector.FilePath, s.name) 326 if err != nil { 327 return err 328 } 329 key := s.pathToKey(path.File) 330 331 // sort 332 sort.Slice(vector.Entries, func(i, j int) bool { 333 return vector.Entries[i].Offset < vector.Entries[j].Offset 334 }) 335 336 // size 337 var size int64 338 if len(vector.Entries) > 0 { 339 last := vector.Entries[len(vector.Entries)-1] 340 size = int64(last.Offset + last.Size) 341 } 342 343 // put 344 content, err := io.ReadAll(newIOEntriesReader(ctx, vector.Entries)) 345 if err != nil { 346 return err 347 } 348 var expire *time.Time 349 if !vector.ExpireAt.IsZero() { 350 expire = &vector.ExpireAt 351 } 352 _, err = s.client.PutObject( 353 ctx, 354 &s3.PutObjectInput{ 355 Bucket: ptrTo(s.bucket), 356 Key: ptrTo(key), 357 Body: bytes.NewReader(content), 358 ContentLength: size, 359 Expires: expire, 360 }, 361 ) 362 if err != nil { 363 return err 364 } 365 366 return nil 367 } 368 369 func (s *S3FS) Read(ctx context.Context, vector *IOVector) (err error) { 370 select { 371 case <-ctx.Done(): 372 return ctx.Err() 373 default: 374 } 375 376 ctx, span := trace.Start(ctx, "S3FS.Read") 377 defer span.End() 378 379 if len(vector.Entries) == 0 { 380 return moerr.NewEmptyVectorNoCtx() 381 } 382 383 if s.memCache != nil { 384 if err := s.memCache.Read(ctx, vector); err != nil { 385 return err 386 } 387 defer func() { 388 if err != nil { 389 return 390 } 391 err = s.memCache.Update(ctx, vector) 392 }() 393 } 394 395 if s.diskCache != nil { 396 if err := s.diskCache.Read(ctx, vector); err != nil { 397 return err 398 } 399 defer func() { 400 if err != nil { 401 return 402 } 403 err = s.diskCache.Update(ctx, vector) 404 }() 405 } 406 407 if err := s.read(ctx, vector); err != nil { 408 return err 409 } 410 411 return nil 412 } 413 414 func (s *S3FS) read(ctx context.Context, vector *IOVector) error { 415 if vector.allDone() { 416 return nil 417 } 418 419 ctx, span := trace.Start(ctx, "S3FS.read") 420 defer span.End() 421 path, err := ParsePathAtService(vector.FilePath, s.name) 422 if err != nil { 423 return err 424 } 425 key := s.pathToKey(path.File) 426 427 // calculate object read range 428 min := int64(math.MaxInt) 429 max := int64(0) 430 readToEnd := false 431 for _, entry := range vector.Entries { 432 if entry.done { 433 continue 434 } 435 if entry.Offset < min { 436 min = entry.Offset 437 } 438 if entry.Size < 0 { 439 entry.Size = 0 440 readToEnd = true 441 } 442 if end := entry.Offset + entry.Size; end > max { 443 max = end 444 } 445 } 446 447 // a function to get an io.ReadCloser 448 getReader := func(ctx context.Context, readToEnd bool, min int64, max int64) (io.ReadCloser, error) { 449 ctx, spanR := trace.Start(ctx, "S3FS.read.getReader") 450 defer spanR.End() 451 if readToEnd { 452 rang := fmt.Sprintf("bytes=%d-", min) 453 output, err := s.client.GetObject( 454 ctx, 455 &s3.GetObjectInput{ 456 Bucket: ptrTo(s.bucket), 457 Key: ptrTo(key), 458 Range: ptrTo(rang), 459 }, 460 ) 461 err = s.mapError(err, key) 462 if err != nil { 463 return nil, err 464 } 465 return output.Body, nil 466 } 467 468 rang := fmt.Sprintf("bytes=%d-%d", min, max) 469 output, err := s.client.GetObject( 470 ctx, 471 &s3.GetObjectInput{ 472 Bucket: ptrTo(s.bucket), 473 Key: ptrTo(key), 474 Range: ptrTo(rang), 475 }, 476 ) 477 err = s.mapError(err, key) 478 if err != nil { 479 return nil, err 480 } 481 return &readCloser{ 482 r: io.LimitReader(output.Body, int64(max-min)), 483 closeFunc: output.Body.Close, 484 }, nil 485 } 486 487 // a function to get data lazily 488 var contentBytes []byte 489 var contentErr error 490 var getContentDone bool 491 getContent := func(ctx context.Context) (bs []byte, err error) { 492 ctx, spanC := trace.Start(ctx, "S3FS.read.getContent") 493 defer spanC.End() 494 if getContentDone { 495 return contentBytes, contentErr 496 } 497 defer func() { 498 contentBytes = bs 499 contentErr = err 500 getContentDone = true 501 }() 502 503 reader, err := getReader(ctx, readToEnd, min, max) 504 if err != nil { 505 return nil, err 506 } 507 defer reader.Close() 508 bs, err = io.ReadAll(reader) 509 err = s.mapError(err, key) 510 if err != nil { 511 return nil, err 512 } 513 514 return 515 } 516 517 for i, entry := range vector.Entries { 518 if entry.done { 519 continue 520 } 521 522 start := entry.Offset - min 523 524 if entry.Size == 0 { 525 return moerr.NewEmptyRangeNoCtx(path.File) 526 } 527 528 // a function to get entry data lazily 529 getData := func(ctx context.Context) ([]byte, error) { 530 ctx, spanD := trace.Start(ctx, "S3FS.reader.getData") 531 defer spanD.End() 532 if entry.Size < 0 { 533 // read to end 534 content, err := getContent(ctx) 535 if err != nil { 536 return nil, err 537 } 538 if start >= int64(len(content)) { 539 return nil, moerr.NewEmptyRangeNoCtx(path.File) 540 } 541 return content[start:], nil 542 } 543 content, err := getContent(ctx) 544 if err != nil { 545 return nil, err 546 } 547 end := start + entry.Size 548 if end > int64(len(content)) { 549 return nil, moerr.NewUnexpectedEOFNoCtx(path.File) 550 } 551 if start == end { 552 return nil, moerr.NewEmptyRangeNoCtx(path.File) 553 } 554 return content[start:end], nil 555 } 556 557 setData := true 558 559 if w := vector.Entries[i].WriterForRead; w != nil { 560 setData = false 561 if getContentDone { 562 // data is ready 563 data, err := getData(ctx) 564 if err != nil { 565 return err 566 } 567 _, err = w.Write(data) 568 if err != nil { 569 return err 570 } 571 572 } else { 573 // get a reader and copy 574 reader, err := getReader(ctx, entry.Size < 0, entry.Offset, entry.Offset+entry.Size) 575 if err != nil { 576 return err 577 } 578 defer reader.Close() 579 _, err = io.Copy(w, reader) 580 err = s.mapError(err, key) 581 if err != nil { 582 return err 583 } 584 } 585 } 586 587 if ptr := vector.Entries[i].ReadCloserForRead; ptr != nil { 588 setData = false 589 if getContentDone { 590 // data is ready 591 data, err := getData(ctx) 592 if err != nil { 593 return err 594 } 595 *ptr = io.NopCloser(bytes.NewReader(data)) 596 597 } else { 598 // get a new reader 599 reader, err := getReader(ctx, entry.Size < 0, entry.Offset, entry.Offset+entry.Size) 600 if err != nil { 601 return err 602 } 603 *ptr = &readCloser{ 604 r: reader, 605 closeFunc: reader.Close, 606 } 607 } 608 } 609 610 // set Data field 611 if setData { 612 data, err := getData(ctx) 613 if err != nil { 614 return err 615 } 616 if int64(len(entry.Data)) < entry.Size || entry.Size < 0 { 617 entry.Data = data 618 if entry.Size < 0 { 619 entry.Size = int64(len(data)) 620 } 621 } else { 622 copy(entry.Data, data) 623 } 624 } 625 626 // set Object field 627 if err := entry.setObjectFromData(); err != nil { 628 return err 629 } 630 631 vector.Entries[i] = entry 632 } 633 634 return nil 635 } 636 637 func (s *S3FS) Delete(ctx context.Context, filePaths ...string) error { 638 select { 639 case <-ctx.Done(): 640 return ctx.Err() 641 default: 642 } 643 644 ctx, span := trace.Start(ctx, "S3FS.Delete") 645 defer span.End() 646 647 if len(filePaths) == 0 { 648 return nil 649 } 650 if len(filePaths) == 1 { 651 return s.deleteSingle(ctx, filePaths[0]) 652 } 653 654 objs := make([]types.ObjectIdentifier, 0, 1000) 655 for _, filePath := range filePaths { 656 path, err := ParsePathAtService(filePath, s.name) 657 if err != nil { 658 return err 659 } 660 objs = append(objs, types.ObjectIdentifier{Key: ptrTo(s.pathToKey(path.File))}) 661 if len(objs) == 1000 { 662 if err := s.deleteMultiObj(ctx, objs); err != nil { 663 return err 664 } 665 objs = objs[:0] 666 } 667 } 668 if err := s.deleteMultiObj(ctx, objs); err != nil { 669 return err 670 } 671 return nil 672 } 673 674 func (s *S3FS) deleteMultiObj(ctx context.Context, objs []types.ObjectIdentifier) error { 675 ctx, span := trace.Start(ctx, "S3FS.deleteMultiObj") 676 defer span.End() 677 output, err := s.client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ 678 Bucket: ptrTo(s.bucket), 679 Delete: &types.Delete{ 680 Objects: objs, 681 // In quiet mode the response includes only keys where the delete action encountered an error. 682 Quiet: true, 683 }, 684 }) 685 // delete api failed 686 if err != nil { 687 return err 688 } 689 // delete api success, but with delete file failed. 690 message := strings.Builder{} 691 if len(output.Errors) > 0 { 692 for _, Error := range output.Errors { 693 if *Error.Code == (*types.NoSuchKey)(nil).ErrorCode() { 694 continue 695 } 696 message.WriteString(fmt.Sprintf("%s: %s, %s;", *Error.Key, *Error.Code, *Error.Message)) 697 } 698 } 699 if message.Len() > 0 { 700 return moerr.NewInternalErrorNoCtx("S3 Delete failed: %s", message.String()) 701 } 702 return nil 703 } 704 705 func (s *S3FS) deleteSingle(ctx context.Context, filePath string) error { 706 ctx, span := trace.Start(ctx, "S3FS.deleteSingle") 707 defer span.End() 708 path, err := ParsePathAtService(filePath, s.name) 709 if err != nil { 710 return err 711 } 712 _, err = s.client.DeleteObject( 713 ctx, 714 &s3.DeleteObjectInput{ 715 Bucket: ptrTo(s.bucket), 716 Key: ptrTo(s.pathToKey(path.File)), 717 }, 718 ) 719 if err != nil { 720 return err 721 } 722 723 return nil 724 } 725 726 func (s *S3FS) pathToKey(filePath string) string { 727 return pathpkg.Join(s.keyPrefix, filePath) 728 } 729 730 func (s *S3FS) keyToPath(key string) string { 731 path := strings.TrimPrefix(key, s.keyPrefix) 732 path = strings.TrimLeft(path, "/") 733 return path 734 } 735 736 func (s *S3FS) mapError(err error, path string) error { 737 if err == nil { 738 return nil 739 } 740 var httpError *http.ResponseError 741 if errors.As(err, &httpError) { 742 if httpError.Response.StatusCode == 404 { 743 return moerr.NewFileNotFoundNoCtx(path) 744 } 745 } 746 return err 747 } 748 749 var _ ETLFileService = new(S3FS) 750 751 func (*S3FS) ETLCompatible() {} 752 753 var _ CachingFileService = new(S3FS) 754 755 func (s *S3FS) FlushCache() { 756 if s.memCache != nil { 757 s.memCache.Flush() 758 } 759 } 760 761 func (s *S3FS) CacheStats() *CacheStats { 762 if s.memCache != nil { 763 return s.memCache.CacheStats() 764 } 765 return nil 766 } 767 768 func newS3FS(arguments []string) (*S3FS, error) { 769 if len(arguments) == 0 { 770 return nil, moerr.NewInvalidInputNoCtx("invalid S3 arguments") 771 } 772 773 var endpoint, region, bucket, apiKey, apiSecret, prefix, roleARN, externalID, name, sharedConfigProfile, isMinio string 774 for _, pair := range arguments { 775 key, value, ok := strings.Cut(pair, "=") 776 if !ok { 777 return nil, moerr.NewInvalidInputNoCtx("invalid S3 argument: %s", pair) 778 } 779 switch key { 780 case "endpoint": 781 endpoint = value 782 case "region": 783 region = value 784 case "bucket": 785 bucket = value 786 case "key": 787 apiKey = value 788 case "secret": 789 apiSecret = value 790 case "prefix": 791 prefix = value 792 case "role-arn": 793 roleARN = value 794 case "external-id": 795 externalID = value 796 case "name": 797 name = value 798 case "shared-config-profile": 799 sharedConfigProfile = value 800 case "is-minio": 801 isMinio = value 802 default: 803 return nil, moerr.NewInvalidInputNoCtx("invalid S3 argument: %s", pair) 804 } 805 } 806 807 if endpoint != "" { 808 u, err := url.Parse(endpoint) 809 if err != nil { 810 return nil, err 811 } 812 if u.Scheme == "" { 813 u.Scheme = "https" 814 } 815 endpoint = u.String() 816 } 817 818 ctx, cancel := context.WithTimeout(context.Background(), time.Minute) 819 defer cancel() 820 821 if region == "" { 822 // try to get region from bucket 823 resp, err := stdhttp.Head("https://" + bucket + ".s3.amazonaws.com") 824 if err == nil { 825 if value := resp.Header.Get("x-amz-bucket-region"); value != "" { 826 region = value 827 } 828 } 829 } 830 831 var credentialProvider aws.CredentialsProvider 832 833 loadConfigOptions := []func(*config.LoadOptions) error{ 834 config.WithLogger(logutil.GetS3Logger()), 835 config.WithClientLogMode( 836 aws.LogSigning | 837 aws.LogRetries | 838 aws.LogRequest | 839 aws.LogResponse | 840 aws.LogDeprecatedUsage | 841 aws.LogRequestEventMessage | 842 aws.LogResponseEventMessage, 843 ), 844 } 845 if sharedConfigProfile != "" { 846 loadConfigOptions = append(loadConfigOptions, 847 config.WithSharedConfigProfile(sharedConfigProfile), 848 ) 849 } 850 851 if apiKey != "" && apiSecret != "" { 852 // static 853 credentialProvider = credentials.NewStaticCredentialsProvider(apiKey, apiSecret, "") 854 } 855 856 if roleARN != "" { 857 // role arn 858 awsConfig, err := config.LoadDefaultConfig(ctx, loadConfigOptions...) 859 if err != nil { 860 return nil, err 861 } 862 863 stsSvc := sts.NewFromConfig(awsConfig, func(options *sts.Options) { 864 if region == "" { 865 options.Region = "ap-northeast-1" 866 } else { 867 options.Region = region 868 } 869 }) 870 credentialProvider = stscreds.NewAssumeRoleProvider( 871 stsSvc, 872 roleARN, 873 func(opts *stscreds.AssumeRoleOptions) { 874 if externalID != "" { 875 opts.ExternalID = &externalID 876 } 877 }, 878 ) 879 // validate 880 _, err = credentialProvider.Retrieve(ctx) 881 if err != nil { 882 return nil, err 883 } 884 } 885 886 if credentialProvider != nil { 887 credentialProvider = aws.NewCredentialsCache(credentialProvider) 888 } 889 890 if credentialProvider != nil { 891 loadConfigOptions = append(loadConfigOptions, 892 config.WithCredentialsProvider( 893 credentialProvider, 894 ), 895 ) 896 } 897 config, err := config.LoadDefaultConfig(ctx, loadConfigOptions...) 898 if err != nil { 899 return nil, err 900 } 901 902 s3Options := []func(*s3.Options){} 903 904 if credentialProvider != nil { 905 s3Options = append(s3Options, 906 func(opt *s3.Options) { 907 opt.Credentials = credentialProvider 908 }, 909 ) 910 } 911 912 if endpoint != "" { 913 if isMinio != "" { 914 // for minio 915 s3Options = append(s3Options, 916 s3.WithEndpointResolver( 917 s3.EndpointResolverFunc( 918 func( 919 region string, 920 _ s3.EndpointResolverOptions, 921 ) ( 922 ep aws.Endpoint, 923 err error, 924 ) { 925 ep.URL = endpoint 926 ep.Source = aws.EndpointSourceCustom 927 ep.HostnameImmutable = true 928 ep.SigningRegion = region 929 return 930 }, 931 ), 932 ), 933 ) 934 } else { 935 s3Options = append(s3Options, 936 s3.WithEndpointResolver( 937 s3.EndpointResolverFromURL(endpoint), 938 ), 939 ) 940 } 941 } 942 943 if region != "" { 944 s3Options = append(s3Options, 945 func(opt *s3.Options) { 946 opt.Region = region 947 }, 948 ) 949 } 950 951 client := s3.NewFromConfig( 952 config, 953 s3Options..., 954 ) 955 956 _, err = client.HeadBucket(ctx, &s3.HeadBucketInput{ 957 Bucket: ptrTo(bucket), 958 }) 959 if err != nil { 960 return nil, moerr.NewInternalErrorNoCtx("bad s3 config: %v", err) 961 } 962 963 fs := &S3FS{ 964 name: name, 965 client: client, 966 bucket: bucket, 967 keyPrefix: prefix, 968 } 969 970 return fs, nil 971 972 }