github.com/aldelo/common@v1.5.1/wrapper/s3/s3.go (about) 1 package s3 2 3 /* 4 * Copyright 2020-2023 Aldelo, LP 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19 // ================================================================================================================= 20 // AWS CREDENTIAL: 21 // use $> aws configure (to set aws access key and secret to target machine) 22 // Store AWS Access ID and Secret Key into Default Profile Using '$ aws configure' cli 23 // 24 // To Install & Setup AWS CLI on Host: 25 // 1) https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html 26 // On Ubuntu, if host does not have zip and unzip: 27 // $> sudo apt install zip 28 // $> sudo apt install unzip 29 // On Ubuntu, to install AWS CLI v2: 30 // $> curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 31 // $> unzip awscliv2.zip 32 // $> sudo ./aws/install 33 // 2) $> aws configure set region awsRegionName --profile default 34 // 3) $> aws configure 35 // follow prompts to enter Access ID and Secret Key 36 // 37 // AWS Region Name Reference: 38 // us-west-2, us-east-1, ap-northeast-1, etc 39 // See: https://docs.aws.amazon.com/general/latest/gr/rande.html 40 // ================================================================================================================= 41 42 import ( 43 "bytes" 44 "context" 45 "errors" 46 util "github.com/aldelo/common" 47 awshttp2 "github.com/aldelo/common/wrapper/aws" 48 "github.com/aldelo/common/wrapper/aws/awsregion" 49 "github.com/aldelo/common/wrapper/xray" 50 "github.com/aws/aws-sdk-go/aws" 51 "github.com/aws/aws-sdk-go/aws/session" 52 "github.com/aws/aws-sdk-go/service/s3" 53 "github.com/aws/aws-sdk-go/service/s3/s3manager" 54 awsxray "github.com/aws/aws-xray-sdk-go/xray" 55 "net/http" 56 "os" 57 "time" 58 ) 59 60 // ================================================================================================================ 61 // STRUCTS 62 // ================================================================================================================ 63 64 // S3 struct encapsulates the AWS S3 access functionality 65 type S3 struct { 66 // define the AWS region that s3 is located at 67 AwsRegion awsregion.AWSRegion 68 69 // custom http2 client options 70 HttpOptions *awshttp2.HttpClientSettings 71 72 // bucket name 73 BucketName string 74 75 // store aws session object 76 sess *session.Session 77 78 // store s3 object to share across session 79 s3Obj *s3.S3 80 81 // store uploader object to share across session 82 uploader *s3manager.Uploader 83 84 // store downloader object to share across session 85 downloader *s3manager.Downloader 86 87 _parentSegment *xray.XRayParentSegment 88 } 89 90 // ================================================================================================================ 91 // STRUCTS FUNCTIONS 92 // ================================================================================================================ 93 94 // ---------------------------------------------------------------------------------------------------------------- 95 // utility functions 96 // ---------------------------------------------------------------------------------------------------------------- 97 98 // Connect will establish a connection to the s3 service 99 func (s *S3) Connect(parentSegment ...*xray.XRayParentSegment) (err error) { 100 if xray.XRayServiceOn() { 101 if len(parentSegment) > 0 { 102 s._parentSegment = parentSegment[0] 103 } 104 105 seg := xray.NewSegment("S3-Connect", s._parentSegment) 106 defer seg.Close() 107 defer func() { 108 _ = seg.Seg.AddMetadata("S3-AWS-Region", s.AwsRegion) 109 _ = seg.Seg.AddMetadata("S3-Bucket-Name", s.BucketName) 110 111 if err != nil { 112 _ = seg.Seg.AddError(err) 113 } 114 }() 115 116 err = s.connectInternal() 117 118 if err == nil { 119 awsxray.AWS(s.s3Obj.Client) 120 } 121 122 return err 123 } else { 124 return s.connectInternal() 125 } 126 } 127 128 // Connect will establish a connection to the s3 service 129 func (s *S3) connectInternal() error { 130 // clean up prior session reference 131 s.sess = nil 132 133 if !s.AwsRegion.Valid() || s.AwsRegion == awsregion.UNKNOWN { 134 return errors.New("Connect To S3 Failed: (AWS Session Error) " + "Region is Required") 135 } 136 137 // create custom http2 client if needed 138 var httpCli *http.Client 139 var httpErr error 140 141 if s.HttpOptions == nil { 142 s.HttpOptions = new(awshttp2.HttpClientSettings) 143 } 144 145 // use custom http2 client 146 h2 := &awshttp2.AwsHttp2Client{ 147 Options: s.HttpOptions, 148 } 149 150 if httpCli, httpErr = h2.NewHttp2Client(); httpErr != nil { 151 return errors.New("Connect to S3 Failed: (AWS Session Error) " + "Create Custom Http2 Client Errored = " + httpErr.Error()) 152 } 153 154 // establish aws session connection and keep session object in struct 155 if sess, err := session.NewSession( 156 &aws.Config{ 157 Region: aws.String(s.AwsRegion.Key()), 158 HTTPClient: httpCli, 159 }); err != nil { 160 // aws session error 161 return errors.New("Connect To S3 Failed: (AWS Session Error) " + err.Error()) 162 } else { 163 // aws session obtained 164 s.sess = sess 165 166 // create cached objects for shared use 167 s.s3Obj = s3.New(s.sess) 168 169 if s.s3Obj == nil { 170 return errors.New("Connect To S3 Object Failed: (New S3 Connection) " + "Connection Object Nil") 171 } 172 173 s.uploader = s3manager.NewUploader(s.sess) 174 175 if s.uploader == nil { 176 return errors.New("Connect To S3Manager Uploader Failed: (New S3Manager Uploader Connection) " + "Connection Object Nil") 177 } 178 179 s.downloader = s3manager.NewDownloader(s.sess) 180 181 if s.downloader == nil { 182 return errors.New("Connect To S3Manager Downloader Failed: (New S3Manager Downloader Connection) " + "Connection Object Nil") 183 } 184 185 // session stored to struct 186 return nil 187 } 188 } 189 190 // Disconnect will disjoin from aws session by clearing it 191 func (s *S3) Disconnect() { 192 s.s3Obj = nil 193 s.uploader = nil 194 s.downloader = nil 195 s.sess = nil 196 } 197 198 // UpdateParentSegment updates this struct's xray parent segment, if no parent segment, set nil 199 func (s *S3) UpdateParentSegment(parentSegment *xray.XRayParentSegment) { 200 s._parentSegment = parentSegment 201 } 202 203 // UploadFile will upload the specified file to S3 in the bucket name defined within S3 struct 204 // 205 // Parameters: 206 // 207 // timeOutDuration = nil if no timeout pre-set via context; otherwise timeout duration typically in seconds via context 208 // sourceFilePath = fully qualified source file path and name to upload 209 // targetKey = the actual key name without any parts with / indicating folder 210 // targetFolder = if the upload position is under one or more 'folder' sub-hierarchy, then specify the target folder names from left to right 211 // 212 // Return Values: 213 // 214 // location = value indicating the location where upload was persisted to on s3 bucket 215 // err = error encountered while attempting to upload 216 func (s *S3) UploadFile(timeOutDuration *time.Duration, sourceFilePath string, targetKey string, targetFolder ...string) (location string, err error) { 217 segCtx := context.Background() 218 segCtxSet := false 219 220 seg := xray.NewSegmentNullable("S3-UploadFile", s._parentSegment) 221 222 if seg != nil { 223 segCtx = seg.Ctx 224 segCtxSet = true 225 226 defer seg.Close() 227 defer func() { 228 _ = seg.Seg.AddMetadata("S3-UploadFile-SourceFilePath", sourceFilePath) 229 _ = seg.Seg.AddMetadata("S3-UploadFile-TargetKey", targetKey) 230 _ = seg.Seg.AddMetadata("S3-UploadFile-TargetFolder", targetFolder) 231 _ = seg.Seg.AddMetadata("S3-UploadFile-Result-Location", location) 232 233 if err != nil { 234 _ = seg.Seg.AddError(err) 235 } 236 }() 237 } 238 239 // validate 240 if s.uploader == nil { 241 err = errors.New("S3 UploadFile Failed: " + "Uploader is Required") 242 return "", err 243 } 244 245 if util.LenTrim(s.BucketName) <= 0 { 246 err = errors.New("S3 UploadFile Failed: " + "Bucket Name is Required") 247 return "", err 248 } 249 250 if util.LenTrim(sourceFilePath) <= 0 { 251 err = errors.New("S3 UploadFile Failed: " + "Source File Path is Required") 252 return "", err 253 } 254 255 if util.LenTrim(targetKey) <= 0 { 256 err = errors.New("S3 UploadFile Failed: " + "Target Key is Required") 257 return "", err 258 } 259 260 if !util.FileExists(sourceFilePath) { 261 err = errors.New("S3 UploadFile Failed: " + "Source File Does Not Exist at Path") 262 return "", err 263 } 264 265 // define key 266 key := targetKey 267 268 if len(targetFolder) > 0 { 269 preKey := "" 270 271 for _, v := range targetFolder { 272 preKey += v + "/" 273 } 274 275 key = preKey + key 276 } 277 278 // open file to prepare for upload 279 if f, err := os.Open(sourceFilePath); err != nil { 280 // open file failed 281 err = errors.New("S3 UploadFile Failed: (Open Source File) " + err.Error()) 282 return "", err 283 } else { 284 // open file successful 285 defer f.Close() 286 287 // upload content to s3 bucket as an object with the key being custom 288 var output *s3manager.UploadOutput 289 290 if timeOutDuration != nil { 291 ctx, cancel := context.WithTimeout(segCtx, *timeOutDuration) 292 defer cancel() 293 294 output, err = s.uploader.UploadWithContext(ctx, &s3manager.UploadInput{ 295 Bucket: aws.String(s.BucketName), 296 Key: aws.String(key), 297 Body: f, 298 }) 299 } else { 300 if segCtxSet { 301 output, err = s.uploader.UploadWithContext(segCtx, 302 &s3manager.UploadInput{ 303 Bucket: aws.String(s.BucketName), 304 Key: aws.String(key), 305 Body: f, 306 }) 307 } else { 308 output, err = s.uploader.Upload(&s3manager.UploadInput{ 309 Bucket: aws.String(s.BucketName), 310 Key: aws.String(key), 311 Body: f, 312 }) 313 } 314 } 315 316 // evaluate result 317 if err != nil { 318 // upload error 319 err = errors.New("S3 UploadFile Failed: (Upload Source File) " + err.Error()) 320 return "", err 321 } else { 322 // upload success 323 location = output.Location 324 return location, nil 325 } 326 } 327 } 328 329 // Upload will upload the specified bytes to S3 in the bucket name defined within S3 struct 330 // 331 // Parameters: 332 // 333 // timeOutDuration = nil if no timeout pre-set via context; otherwise timeout duration typically in seconds via context 334 // data = slice of bytes to upload to s3 335 // targetKey = the actual key name without any parts with / indicating folder 336 // targetFolder = if the upload position is under one or more 'folder' sub-hierarchy, then specify the target folder names from left to right 337 // 338 // Return Values: 339 // 340 // location = value indicating the location where upload was persisted to on s3 bucket 341 // err = error encountered while attempting to upload 342 func (s *S3) Upload(timeOutDuration *time.Duration, data []byte, targetKey string, targetFolder ...string) (location string, err error) { 343 segCtx := context.Background() 344 segCtxSet := false 345 346 seg := xray.NewSegmentNullable("S3-Upload", s._parentSegment) 347 348 if seg != nil { 349 segCtx = seg.Ctx 350 segCtxSet = true 351 352 defer seg.Close() 353 defer func() { 354 _ = seg.Seg.AddMetadata("S3-Upload-TargetKey", targetKey) 355 _ = seg.Seg.AddMetadata("S3-Upload-TargetFolder", targetFolder) 356 _ = seg.Seg.AddMetadata("S3-Upload-Result-Location", location) 357 358 if err != nil { 359 _ = seg.Seg.AddError(err) 360 } 361 }() 362 } 363 364 // validate 365 if s.uploader == nil { 366 err = errors.New("S3 Upload Failed: " + "Uploader is Required") 367 return "", err 368 } 369 370 if util.LenTrim(s.BucketName) <= 0 { 371 err = errors.New("S3 Upload Failed: " + "Bucket Name is Required") 372 return "", err 373 } 374 375 if data == nil { 376 err = errors.New("S3 Upload Failed: " + "Data To Upload is Required (Slice=Nil)") 377 return "", err 378 } 379 380 if len(data) <= 0 { 381 err = errors.New("S3 Upload Failed: " + "Data To Upload is Required (Len=0)") 382 return "", err 383 } 384 385 if util.LenTrim(targetKey) <= 0 { 386 err = errors.New("S3 Upload Failed: " + "Target Key is Required") 387 return "", err 388 } 389 390 // generate io.Reader from byte slice 391 r := bytes.NewReader(data) 392 393 // define key 394 key := targetKey 395 396 if len(targetFolder) > 0 { 397 preKey := "" 398 399 for _, v := range targetFolder { 400 preKey += v + "/" 401 } 402 403 key = preKey + key 404 } 405 406 // upload content to s3 bucket as an object with the key being custom 407 var output *s3manager.UploadOutput 408 409 if timeOutDuration != nil { 410 ctx, cancel := context.WithTimeout(segCtx, *timeOutDuration) 411 defer cancel() 412 413 output, err = s.uploader.UploadWithContext(ctx, &s3manager.UploadInput{ 414 Bucket: aws.String(s.BucketName), 415 Key: aws.String(key), 416 Body: r, 417 }) 418 } else { 419 if segCtxSet { 420 output, err = s.uploader.UploadWithContext(segCtx, 421 &s3manager.UploadInput{ 422 Bucket: aws.String(s.BucketName), 423 Key: aws.String(key), 424 Body: r, 425 }) 426 } else { 427 output, err = s.uploader.Upload(&s3manager.UploadInput{ 428 Bucket: aws.String(s.BucketName), 429 Key: aws.String(key), 430 Body: r, 431 }) 432 } 433 } 434 435 // evaluate result 436 if err != nil { 437 // upload error 438 err = errors.New("S3 Upload Failed: (Upload Bytes) " + err.Error()) 439 return "", err 440 } else { 441 // upload success 442 location = output.Location 443 return location, nil 444 } 445 } 446 447 // DownloadFile will download an object from S3 bucket by key and persist into file on disk 448 // 449 // Parameters: 450 // 451 // timeOutDuration = nil if no timeout pre-set via context; otherwise timeout duration typically in seconds via context 452 // writeToFilePath = file path that will save the file containing s3 object content 453 // targetKey = the actual key name without any parts with / indicating folder 454 // targetFolder = if the download position is under one or more 'folder' sub-hierarchy, then specify the target folder names from left to right 455 // 456 // Return Values: 457 // 458 // location = local disk file path where downloaded content is stored into 459 // notFound = key was not found in s3 bucket 460 // err = error encountered while attempting to download 461 func (s *S3) DownloadFile(timeOutDuration *time.Duration, writeToFilePath string, targetKey string, targetFolder ...string) (location string, notFound bool, err error) { 462 segCtx := context.Background() 463 segCtxSet := false 464 465 seg := xray.NewSegmentNullable("S3-DownloadFile", s._parentSegment) 466 467 if seg != nil { 468 segCtx = seg.Ctx 469 segCtxSet = true 470 471 defer seg.Close() 472 defer func() { 473 _ = seg.Seg.AddMetadata("S3-DownloadFile-TargetKey", targetKey) 474 _ = seg.Seg.AddMetadata("S3-DownloadFile-TargetFolder", targetFolder) 475 _ = seg.Seg.AddMetadata("S3-DownloadFile-WriteToFilePath", writeToFilePath) 476 _ = seg.Seg.AddMetadata("S3-DownloadFile-Result-NotFound", notFound) 477 _ = seg.Seg.AddMetadata("S3-DownloadFile-Result-Location", location) 478 479 if err != nil { 480 _ = seg.Seg.AddError(err) 481 } 482 }() 483 } 484 485 // validate 486 if s.downloader == nil { 487 err = errors.New("S3 DownloadFile Failed: " + "Downloader is Required") 488 return "", false, err 489 } 490 491 if util.LenTrim(s.BucketName) <= 0 { 492 err = errors.New("S3 DownloadFile Failed: " + "Bucket Name is Required") 493 return "", false, err 494 } 495 496 if util.LenTrim(writeToFilePath) <= 0 { 497 err = errors.New("S3 DownloadFile Failed: " + "Write To File Path is Required") 498 return "", false, err 499 } 500 501 if util.LenTrim(targetKey) <= 0 { 502 err = errors.New("S3 DownloadFile Failed: " + "Target Key is Required") 503 return "", false, err 504 } 505 506 // create write buffer to store s3 download content 507 f, e := os.Create(writeToFilePath) 508 509 if e != nil { 510 err = errors.New("S3 DownloadFile Failed: " + "Create File for Write To File Path Failed") 511 return "", false, err 512 } 513 514 defer f.Close() 515 516 // define key 517 key := targetKey 518 519 if len(targetFolder) > 0 { 520 preKey := "" 521 522 for _, v := range targetFolder { 523 preKey += v + "/" 524 } 525 526 key = preKey + key 527 } 528 529 // download content from s3 bucket as an object with the key being custom 530 var bytesCount int64 531 532 if timeOutDuration != nil { 533 ctx, cancel := context.WithTimeout(segCtx, *timeOutDuration) 534 defer cancel() 535 536 bytesCount, err = s.downloader.DownloadWithContext(ctx, f, &s3.GetObjectInput{ 537 Bucket: aws.String(s.BucketName), 538 Key: aws.String(key), 539 }) 540 } else { 541 if segCtxSet { 542 bytesCount, err = s.downloader.DownloadWithContext(segCtx, f, &s3.GetObjectInput{ 543 Bucket: aws.String(s.BucketName), 544 Key: aws.String(key), 545 }) 546 } else { 547 bytesCount, err = s.downloader.Download(f, &s3.GetObjectInput{ 548 Bucket: aws.String(s.BucketName), 549 Key: aws.String(key), 550 }) 551 } 552 } 553 554 // evaluate result 555 if err != nil { 556 // download error 557 err = errors.New("S3 DownloadFile Failed: (Download File) " + err.Error()) 558 return "", false, err 559 } else { 560 // download successful 561 if bytesCount <= 0 { 562 // not found 563 notFound = true 564 return "", notFound, nil 565 } else { 566 // found 567 location = writeToFilePath 568 return location, false, nil 569 } 570 } 571 } 572 573 // Download will download an object from S3 bucket by key and return via byte slice 574 // 575 // Parameters: 576 // 577 // timeOutDuration = nil if no timeout pre-set via context; otherwise timeout duration typically in seconds via context 578 // targetKey = the actual key name without any parts with / indicating folder 579 // targetFolder = if the download position is under one or more 'folder' sub-hierarchy, then specify the target folder names from left to right 580 // 581 // Return Values: 582 // 583 // data = byte slice of object downloaded from s3 bucket by key 584 // notFound = key was not found in s3 bucket 585 // err = error encountered while attempting to download 586 func (s *S3) Download(timeOutDuration *time.Duration, targetKey string, targetFolder ...string) (data []byte, notFound bool, err error) { 587 segCtx := context.Background() 588 segCtxSet := false 589 590 seg := xray.NewSegmentNullable("S3-Download", s._parentSegment) 591 592 if seg != nil { 593 segCtx = seg.Ctx 594 segCtxSet = true 595 596 defer seg.Close() 597 defer func() { 598 _ = seg.Seg.AddMetadata("S3-Download-TargetKey", targetKey) 599 _ = seg.Seg.AddMetadata("S3-Download-TargetFolder", targetFolder) 600 _ = seg.Seg.AddMetadata("S3-Download-Result-NotFound", notFound) 601 602 if err != nil { 603 _ = seg.Seg.AddError(err) 604 } 605 }() 606 } 607 608 // validate 609 if s.downloader == nil { 610 err = errors.New("S3 Download Failed: " + "Downloader is Required") 611 return nil, false, err 612 } 613 614 if util.LenTrim(s.BucketName) <= 0 { 615 err = errors.New("S3 Download Failed: " + "Bucket Name is Required") 616 return nil, false, err 617 } 618 619 if util.LenTrim(targetKey) <= 0 { 620 err = errors.New("S3 Download Failed: " + "Target Key is Required") 621 return nil, false, err 622 } 623 624 // create write buffer to store s3 download content 625 buf := aws.NewWriteAtBuffer([]byte{}) 626 627 // define key 628 key := targetKey 629 630 if len(targetFolder) > 0 { 631 preKey := "" 632 633 for _, v := range targetFolder { 634 preKey += v + "/" 635 } 636 637 key = preKey + key 638 } 639 640 // download content from s3 bucket as an object with the key being custom 641 var bytesCount int64 642 643 if timeOutDuration != nil { 644 ctx, cancel := context.WithTimeout(segCtx, *timeOutDuration) 645 defer cancel() 646 647 bytesCount, err = s.downloader.DownloadWithContext(ctx, buf, &s3.GetObjectInput{ 648 Bucket: aws.String(s.BucketName), 649 Key: aws.String(key), 650 }) 651 } else { 652 if segCtxSet { 653 bytesCount, err = s.downloader.DownloadWithContext(segCtx, buf, &s3.GetObjectInput{ 654 Bucket: aws.String(s.BucketName), 655 Key: aws.String(key), 656 }) 657 } else { 658 bytesCount, err = s.downloader.Download(buf, &s3.GetObjectInput{ 659 Bucket: aws.String(s.BucketName), 660 Key: aws.String(key), 661 }) 662 } 663 } 664 665 // evaluate result 666 if err != nil { 667 // download error 668 err = errors.New("S3 Download Failed: (Download Bytes) " + err.Error()) 669 return nil, false, err 670 } else { 671 // download successful 672 if bytesCount <= 0 { 673 // not found 674 notFound = true 675 return nil, notFound, nil 676 } else { 677 // found 678 data = buf.Bytes() 679 return data, false, nil 680 } 681 } 682 } 683 684 // Delete will delete an object from S3 bucket by key 685 // 686 // Parameters: 687 // 688 // timeOutDuration = nil if no timeout pre-set via context; otherwise timeout duration typically in seconds via context 689 // targetKey = the actual key name without any parts with / indicating folder 690 // targetFolder = if the delete position is under one or more 'folder' sub-hierarchy, then specify the target folder names from left to right 691 // 692 // Return Values: 693 // 694 // deleteSuccess = true if delete was successfully completed; false if delete failed to perform, check error if any 695 // err = error encountered while attempting to download 696 func (s *S3) Delete(timeOutDuration *time.Duration, targetKey string, targetFolder ...string) (deleteSuccess bool, err error) { 697 segCtx := context.Background() 698 segCtxSet := false 699 700 seg := xray.NewSegmentNullable("S3-Delete", s._parentSegment) 701 702 if seg != nil { 703 segCtx = seg.Ctx 704 segCtxSet = true 705 706 defer seg.Close() 707 defer func() { 708 _ = seg.Seg.AddMetadata("S3-Delete-TargetKey", targetKey) 709 _ = seg.Seg.AddMetadata("S3-Delete-TargetFolder", targetFolder) 710 _ = seg.Seg.AddMetadata("S3-Delete-Result-Success", deleteSuccess) 711 712 if err != nil { 713 _ = seg.Seg.AddError(err) 714 } 715 }() 716 } 717 718 // validate 719 if s.s3Obj == nil { 720 err = errors.New("S3 Delete Failed: " + "S3 Object is Required") 721 return false, err 722 } 723 724 if util.LenTrim(s.BucketName) <= 0 { 725 err = errors.New("S3 Delete Failed: " + "Bucket Name is Required") 726 return false, err 727 } 728 729 if util.LenTrim(targetKey) <= 0 { 730 err = errors.New("S3 Delete Failed: " + "Target Key is Required") 731 return false, err 732 } 733 734 // define key 735 key := targetKey 736 737 if len(targetFolder) > 0 { 738 preKey := "" 739 740 for _, v := range targetFolder { 741 preKey += v + "/" 742 } 743 744 key = preKey + key 745 } 746 747 // delete object from s3 bucket 748 if timeOutDuration != nil { 749 ctx, cancel := context.WithTimeout(segCtx, *timeOutDuration) 750 defer cancel() 751 752 _, err = s.s3Obj.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{ 753 Bucket: aws.String(s.BucketName), 754 Key: aws.String(key), 755 }) 756 } else { 757 if segCtxSet { 758 _, err = s.s3Obj.DeleteObjectWithContext(segCtx, &s3.DeleteObjectInput{ 759 Bucket: aws.String(s.BucketName), 760 Key: aws.String(key), 761 }) 762 } else { 763 _, err = s.s3Obj.DeleteObject(&s3.DeleteObjectInput{ 764 Bucket: aws.String(s.BucketName), 765 Key: aws.String(key), 766 }) 767 } 768 } 769 770 // evaluate result 771 if err != nil { 772 // delete error 773 err = errors.New("S3 Delete Failed: (Delete Object) " + err.Error()) 774 return false, err 775 } else { 776 // delete successful 777 deleteSuccess = true 778 return deleteSuccess, nil 779 } 780 }