github.com/aspring/terraform@v0.8.2-0.20161216122603-6a8619a5db2e/builtin/providers/aws/resource_aws_s3_bucket.go (about) 1 package aws 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "log" 8 "net/url" 9 "time" 10 11 "github.com/aws/aws-sdk-go/aws" 12 "github.com/aws/aws-sdk-go/aws/awserr" 13 "github.com/aws/aws-sdk-go/service/s3" 14 "github.com/hashicorp/errwrap" 15 "github.com/hashicorp/terraform/helper/hashcode" 16 "github.com/hashicorp/terraform/helper/resource" 17 "github.com/hashicorp/terraform/helper/schema" 18 ) 19 20 func resourceAwsS3Bucket() *schema.Resource { 21 return &schema.Resource{ 22 Create: resourceAwsS3BucketCreate, 23 Read: resourceAwsS3BucketRead, 24 Update: resourceAwsS3BucketUpdate, 25 Delete: resourceAwsS3BucketDelete, 26 Importer: &schema.ResourceImporter{ 27 State: resourceAwsS3BucketImportState, 28 }, 29 30 Schema: map[string]*schema.Schema{ 31 "bucket": { 32 Type: schema.TypeString, 33 Required: true, 34 ForceNew: true, 35 }, 36 37 "arn": { 38 Type: schema.TypeString, 39 Optional: true, 40 Computed: true, 41 }, 42 43 "acl": { 44 Type: schema.TypeString, 45 Default: "private", 46 Optional: true, 47 }, 48 49 "policy": { 50 Type: schema.TypeString, 51 Optional: true, 52 ValidateFunc: validateJsonString, 53 DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, 54 }, 55 56 "cors_rule": { 57 Type: schema.TypeList, 58 Optional: true, 59 Elem: &schema.Resource{ 60 Schema: map[string]*schema.Schema{ 61 "allowed_headers": { 62 Type: schema.TypeList, 63 Optional: true, 64 Elem: &schema.Schema{Type: schema.TypeString}, 65 }, 66 "allowed_methods": { 67 Type: schema.TypeList, 68 Required: true, 69 Elem: &schema.Schema{Type: schema.TypeString}, 70 }, 71 "allowed_origins": { 72 Type: schema.TypeList, 73 Required: true, 74 Elem: &schema.Schema{Type: schema.TypeString}, 75 }, 76 "expose_headers": { 77 Type: schema.TypeList, 78 Optional: true, 79 Elem: &schema.Schema{Type: schema.TypeString}, 80 }, 81 "max_age_seconds": { 82 Type: schema.TypeInt, 83 Optional: true, 84 }, 85 }, 86 }, 87 }, 88 89 "website": { 90 Type: schema.TypeList, 91 Optional: true, 92 Elem: &schema.Resource{ 93 Schema: map[string]*schema.Schema{ 94 "index_document": { 95 Type: schema.TypeString, 96 Optional: true, 97 }, 98 99 "error_document": { 100 Type: schema.TypeString, 101 Optional: true, 102 }, 103 104 "redirect_all_requests_to": { 105 Type: schema.TypeString, 106 ConflictsWith: []string{ 107 "website.0.index_document", 108 "website.0.error_document", 109 "website.0.routing_rules", 110 }, 111 Optional: true, 112 }, 113 114 "routing_rules": { 115 Type: schema.TypeString, 116 Optional: true, 117 ValidateFunc: validateJsonString, 118 StateFunc: func(v interface{}) string { 119 json, _ := normalizeJsonString(v) 120 return json 121 }, 122 }, 123 }, 124 }, 125 }, 126 127 "hosted_zone_id": { 128 Type: schema.TypeString, 129 Optional: true, 130 Computed: true, 131 }, 132 133 "region": { 134 Type: schema.TypeString, 135 Optional: true, 136 Computed: true, 137 }, 138 "website_endpoint": { 139 Type: schema.TypeString, 140 Optional: true, 141 Computed: true, 142 }, 143 "website_domain": { 144 Type: schema.TypeString, 145 Optional: true, 146 Computed: true, 147 }, 148 149 "versioning": { 150 Type: schema.TypeList, 151 Optional: true, 152 Computed: true, 153 MaxItems: 1, 154 Elem: &schema.Resource{ 155 Schema: map[string]*schema.Schema{ 156 "enabled": { 157 Type: schema.TypeBool, 158 Optional: true, 159 Default: false, 160 }, 161 "mfa_delete": { 162 Type: schema.TypeBool, 163 Optional: true, 164 Default: false, 165 }, 166 }, 167 }, 168 }, 169 170 "logging": { 171 Type: schema.TypeSet, 172 Optional: true, 173 Elem: &schema.Resource{ 174 Schema: map[string]*schema.Schema{ 175 "target_bucket": { 176 Type: schema.TypeString, 177 Required: true, 178 }, 179 "target_prefix": { 180 Type: schema.TypeString, 181 Optional: true, 182 }, 183 }, 184 }, 185 Set: func(v interface{}) int { 186 var buf bytes.Buffer 187 m := v.(map[string]interface{}) 188 buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"])) 189 buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"])) 190 return hashcode.String(buf.String()) 191 }, 192 }, 193 194 "lifecycle_rule": { 195 Type: schema.TypeList, 196 Optional: true, 197 Elem: &schema.Resource{ 198 Schema: map[string]*schema.Schema{ 199 "id": { 200 Type: schema.TypeString, 201 Optional: true, 202 Computed: true, 203 ValidateFunc: validateS3BucketLifecycleRuleId, 204 }, 205 "prefix": { 206 Type: schema.TypeString, 207 Required: true, 208 }, 209 "enabled": { 210 Type: schema.TypeBool, 211 Required: true, 212 }, 213 "abort_incomplete_multipart_upload_days": { 214 Type: schema.TypeInt, 215 Optional: true, 216 }, 217 "expiration": { 218 Type: schema.TypeSet, 219 Optional: true, 220 Set: expirationHash, 221 Elem: &schema.Resource{ 222 Schema: map[string]*schema.Schema{ 223 "date": { 224 Type: schema.TypeString, 225 Optional: true, 226 ValidateFunc: validateS3BucketLifecycleTimestamp, 227 }, 228 "days": { 229 Type: schema.TypeInt, 230 Optional: true, 231 }, 232 "expired_object_delete_marker": { 233 Type: schema.TypeBool, 234 Optional: true, 235 }, 236 }, 237 }, 238 }, 239 "noncurrent_version_expiration": { 240 Type: schema.TypeSet, 241 Optional: true, 242 Set: expirationHash, 243 Elem: &schema.Resource{ 244 Schema: map[string]*schema.Schema{ 245 "days": { 246 Type: schema.TypeInt, 247 Optional: true, 248 }, 249 }, 250 }, 251 }, 252 "transition": { 253 Type: schema.TypeSet, 254 Optional: true, 255 Set: transitionHash, 256 Elem: &schema.Resource{ 257 Schema: map[string]*schema.Schema{ 258 "date": { 259 Type: schema.TypeString, 260 Optional: true, 261 ValidateFunc: validateS3BucketLifecycleTimestamp, 262 }, 263 "days": { 264 Type: schema.TypeInt, 265 Optional: true, 266 }, 267 "storage_class": { 268 Type: schema.TypeString, 269 Required: true, 270 ValidateFunc: validateS3BucketLifecycleStorageClass, 271 }, 272 }, 273 }, 274 }, 275 "noncurrent_version_transition": { 276 Type: schema.TypeSet, 277 Optional: true, 278 Set: transitionHash, 279 Elem: &schema.Resource{ 280 Schema: map[string]*schema.Schema{ 281 "days": { 282 Type: schema.TypeInt, 283 Optional: true, 284 }, 285 "storage_class": { 286 Type: schema.TypeString, 287 Required: true, 288 ValidateFunc: validateS3BucketLifecycleStorageClass, 289 }, 290 }, 291 }, 292 }, 293 }, 294 }, 295 }, 296 297 "force_destroy": { 298 Type: schema.TypeBool, 299 Optional: true, 300 Default: false, 301 }, 302 303 "acceleration_status": { 304 Type: schema.TypeString, 305 Optional: true, 306 Computed: true, 307 ValidateFunc: validateS3BucketAccelerationStatus, 308 }, 309 310 "request_payer": { 311 Type: schema.TypeString, 312 Optional: true, 313 Computed: true, 314 ValidateFunc: validateS3BucketRequestPayerType, 315 }, 316 317 "replication_configuration": { 318 Type: schema.TypeList, 319 Optional: true, 320 MaxItems: 1, 321 Elem: &schema.Resource{ 322 Schema: map[string]*schema.Schema{ 323 "role": { 324 Type: schema.TypeString, 325 Required: true, 326 }, 327 "rules": { 328 Type: schema.TypeSet, 329 Required: true, 330 Set: rulesHash, 331 Elem: &schema.Resource{ 332 Schema: map[string]*schema.Schema{ 333 "id": { 334 Type: schema.TypeString, 335 Optional: true, 336 ValidateFunc: validateS3BucketReplicationRuleId, 337 }, 338 "destination": { 339 Type: schema.TypeSet, 340 MaxItems: 1, 341 MinItems: 1, 342 Required: true, 343 Set: destinationHash, 344 Elem: &schema.Resource{ 345 Schema: map[string]*schema.Schema{ 346 "bucket": { 347 Type: schema.TypeString, 348 Required: true, 349 ValidateFunc: validateArn, 350 }, 351 "storage_class": { 352 Type: schema.TypeString, 353 Optional: true, 354 ValidateFunc: validateS3BucketReplicationDestinationStorageClass, 355 }, 356 }, 357 }, 358 }, 359 "prefix": { 360 Type: schema.TypeString, 361 Required: true, 362 ValidateFunc: validateS3BucketReplicationRulePrefix, 363 }, 364 "status": { 365 Type: schema.TypeString, 366 Required: true, 367 ValidateFunc: validateS3BucketReplicationRuleStatus, 368 }, 369 }, 370 }, 371 }, 372 }, 373 }, 374 }, 375 376 "tags": tagsSchema(), 377 }, 378 } 379 } 380 381 func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { 382 s3conn := meta.(*AWSClient).s3conn 383 384 // Get the bucket and acl 385 bucket := d.Get("bucket").(string) 386 acl := d.Get("acl").(string) 387 388 log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl) 389 390 req := &s3.CreateBucketInput{ 391 Bucket: aws.String(bucket), 392 ACL: aws.String(acl), 393 } 394 395 var awsRegion string 396 if region, ok := d.GetOk("region"); ok { 397 awsRegion = region.(string) 398 } else { 399 awsRegion = meta.(*AWSClient).region 400 } 401 log.Printf("[DEBUG] S3 bucket create: %s, using region: %s", bucket, awsRegion) 402 403 // Special case us-east-1 region and do not set the LocationConstraint. 404 // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html 405 if awsRegion != "us-east-1" { 406 req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ 407 LocationConstraint: aws.String(awsRegion), 408 } 409 } 410 411 err := resource.Retry(5*time.Minute, func() *resource.RetryError { 412 log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket) 413 _, err := s3conn.CreateBucket(req) 414 if awsErr, ok := err.(awserr.Error); ok { 415 if awsErr.Code() == "OperationAborted" { 416 log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err) 417 return resource.RetryableError( 418 fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s", 419 bucket, err)) 420 } 421 } 422 if err != nil { 423 return resource.NonRetryableError(err) 424 } 425 426 return nil 427 }) 428 429 if err != nil { 430 return fmt.Errorf("Error creating S3 bucket: %s", err) 431 } 432 433 // Assign the bucket name as the resource ID 434 d.SetId(bucket) 435 436 return resourceAwsS3BucketUpdate(d, meta) 437 } 438 439 func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { 440 s3conn := meta.(*AWSClient).s3conn 441 if err := setTagsS3(s3conn, d); err != nil { 442 return err 443 } 444 445 if d.HasChange("policy") { 446 if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil { 447 return err 448 } 449 } 450 451 if d.HasChange("cors_rule") { 452 if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil { 453 return err 454 } 455 } 456 457 if d.HasChange("website") { 458 if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { 459 return err 460 } 461 } 462 463 if d.HasChange("versioning") { 464 if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil { 465 return err 466 } 467 } 468 if d.HasChange("acl") { 469 if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil { 470 return err 471 } 472 } 473 474 if d.HasChange("logging") { 475 if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil { 476 return err 477 } 478 } 479 480 if d.HasChange("lifecycle_rule") { 481 if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil { 482 return err 483 } 484 } 485 486 if d.HasChange("acceleration_status") { 487 if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil { 488 return err 489 } 490 } 491 492 if d.HasChange("request_payer") { 493 if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil { 494 return err 495 } 496 } 497 498 if d.HasChange("replication_configuration") { 499 if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil { 500 return err 501 } 502 } 503 504 return resourceAwsS3BucketRead(d, meta) 505 } 506 507 func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { 508 s3conn := meta.(*AWSClient).s3conn 509 510 var err error 511 _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ 512 Bucket: aws.String(d.Id()), 513 }) 514 if err != nil { 515 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { 516 log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) 517 d.SetId("") 518 return nil 519 } else { 520 // some of the AWS SDK's errors can be empty strings, so let's add 521 // some additional context. 522 return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) 523 } 524 } 525 526 // In the import case, we won't have this 527 if _, ok := d.GetOk("bucket"); !ok { 528 d.Set("bucket", d.Id()) 529 } 530 531 // Read the policy 532 if _, ok := d.GetOk("policy"); ok { 533 pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ 534 Bucket: aws.String(d.Id()), 535 }) 536 log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol) 537 if err != nil { 538 if err := d.Set("policy", ""); err != nil { 539 return err 540 } 541 } else { 542 if v := pol.Policy; v == nil { 543 if err := d.Set("policy", ""); err != nil { 544 return err 545 } 546 } else { 547 policy, err := normalizeJsonString(*v) 548 if err != nil { 549 return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) 550 } 551 d.Set("policy", policy) 552 } 553 } 554 } 555 556 // Read the CORS 557 cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ 558 Bucket: aws.String(d.Id()), 559 }) 560 if err != nil { 561 // An S3 Bucket might not have CORS configuration set. 562 if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" { 563 return err 564 } 565 log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id()) 566 } 567 log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) 568 if cors.CORSRules != nil { 569 rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) 570 for _, ruleObject := range cors.CORSRules { 571 rule := make(map[string]interface{}) 572 rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders) 573 rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods) 574 rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins) 575 // Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set. 576 if ruleObject.AllowedOrigins != nil { 577 rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders) 578 } 579 if ruleObject.MaxAgeSeconds != nil { 580 rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds) 581 } 582 rules = append(rules, rule) 583 } 584 if err := d.Set("cors_rule", rules); err != nil { 585 return err 586 } 587 } 588 589 // Read the website configuration 590 ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ 591 Bucket: aws.String(d.Id()), 592 }) 593 var websites []map[string]interface{} 594 if err == nil { 595 w := make(map[string]interface{}) 596 597 if v := ws.IndexDocument; v != nil { 598 w["index_document"] = *v.Suffix 599 } 600 601 if v := ws.ErrorDocument; v != nil { 602 w["error_document"] = *v.Key 603 } 604 605 if v := ws.RedirectAllRequestsTo; v != nil { 606 if v.Protocol == nil { 607 w["redirect_all_requests_to"] = *v.HostName 608 } else { 609 var host string 610 var path string 611 parsedHostName, err := url.Parse(*v.HostName) 612 if err == nil { 613 host = parsedHostName.Host 614 path = parsedHostName.Path 615 } else { 616 host = *v.HostName 617 path = "" 618 } 619 620 w["redirect_all_requests_to"] = (&url.URL{ 621 Host: host, 622 Path: path, 623 Scheme: *v.Protocol, 624 }).String() 625 } 626 } 627 628 if v := ws.RoutingRules; v != nil { 629 rr, err := normalizeRoutingRules(v) 630 if err != nil { 631 return fmt.Errorf("Error while marshaling routing rules: %s", err) 632 } 633 w["routing_rules"] = rr 634 } 635 636 websites = append(websites, w) 637 } 638 if err := d.Set("website", websites); err != nil { 639 return err 640 } 641 642 // Read the versioning configuration 643 versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{ 644 Bucket: aws.String(d.Id()), 645 }) 646 if err != nil { 647 return err 648 } 649 log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning) 650 if versioning != nil { 651 vcl := make([]map[string]interface{}, 0, 1) 652 vc := make(map[string]interface{}) 653 if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled { 654 vc["enabled"] = true 655 } else { 656 vc["enabled"] = false 657 } 658 659 if versioning.MFADelete != nil && *versioning.MFADelete == s3.MFADeleteEnabled { 660 vc["mfa_delete"] = true 661 } else { 662 vc["mfa_delete"] = false 663 } 664 vcl = append(vcl, vc) 665 if err := d.Set("versioning", vcl); err != nil { 666 return err 667 } 668 } 669 670 // Read the acceleration status 671 accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{ 672 Bucket: aws.String(d.Id()), 673 }) 674 if err != nil { 675 // Amazon S3 Transfer Acceleration might not be supported in the 676 // given region, for example, China (Beijing) and the Government 677 // Cloud does not support this feature at the moment. 678 if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" { 679 return err 680 } 681 682 var awsRegion string 683 if region, ok := d.GetOk("region"); ok { 684 awsRegion = region.(string) 685 } else { 686 awsRegion = meta.(*AWSClient).region 687 } 688 689 log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), awsRegion) 690 } else { 691 log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) 692 d.Set("acceleration_status", accelerate.Status) 693 } 694 695 // Read the request payer configuration. 696 payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{ 697 Bucket: aws.String(d.Id()), 698 }) 699 if err != nil { 700 return err 701 } 702 log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer) 703 if payer.Payer != nil { 704 if err := d.Set("request_payer", *payer.Payer); err != nil { 705 return err 706 } 707 } 708 709 // Read the logging configuration 710 logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ 711 Bucket: aws.String(d.Id()), 712 }) 713 if err != nil { 714 return err 715 } 716 717 log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging) 718 if v := logging.LoggingEnabled; v != nil { 719 lcl := make([]map[string]interface{}, 0, 1) 720 lc := make(map[string]interface{}) 721 if *v.TargetBucket != "" { 722 lc["target_bucket"] = *v.TargetBucket 723 } 724 if *v.TargetPrefix != "" { 725 lc["target_prefix"] = *v.TargetPrefix 726 } 727 lcl = append(lcl, lc) 728 if err := d.Set("logging", lcl); err != nil { 729 return err 730 } 731 } 732 733 // Read the lifecycle configuration 734 lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{ 735 Bucket: aws.String(d.Id()), 736 }) 737 if err != nil { 738 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { 739 return err 740 } 741 } 742 log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle) 743 if len(lifecycle.Rules) > 0 { 744 rules := make([]map[string]interface{}, 0, len(lifecycle.Rules)) 745 746 for _, lifecycleRule := range lifecycle.Rules { 747 rule := make(map[string]interface{}) 748 749 // ID 750 if lifecycleRule.ID != nil && *lifecycleRule.ID != "" { 751 rule["id"] = *lifecycleRule.ID 752 } 753 // Prefix 754 if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" { 755 rule["prefix"] = *lifecycleRule.Prefix 756 } 757 // Enabled 758 if lifecycleRule.Status != nil { 759 if *lifecycleRule.Status == s3.ExpirationStatusEnabled { 760 rule["enabled"] = true 761 } else { 762 rule["enabled"] = false 763 } 764 } 765 766 // AbortIncompleteMultipartUploadDays 767 if lifecycleRule.AbortIncompleteMultipartUpload != nil { 768 if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { 769 rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation) 770 } 771 } 772 773 // expiration 774 if lifecycleRule.Expiration != nil { 775 e := make(map[string]interface{}) 776 if lifecycleRule.Expiration.Date != nil { 777 e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02") 778 } 779 if lifecycleRule.Expiration.Days != nil { 780 e["days"] = int(*lifecycleRule.Expiration.Days) 781 } 782 if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { 783 e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker 784 } 785 rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 786 } 787 // noncurrent_version_expiration 788 if lifecycleRule.NoncurrentVersionExpiration != nil { 789 e := make(map[string]interface{}) 790 if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { 791 e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays) 792 } 793 rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 794 } 795 //// transition 796 if len(lifecycleRule.Transitions) > 0 { 797 transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) 798 for _, v := range lifecycleRule.Transitions { 799 t := make(map[string]interface{}) 800 if v.Date != nil { 801 t["date"] = (*v.Date).Format("2006-01-02") 802 } 803 if v.Days != nil { 804 t["days"] = int(*v.Days) 805 } 806 if v.StorageClass != nil { 807 t["storage_class"] = *v.StorageClass 808 } 809 transitions = append(transitions, t) 810 } 811 rule["transition"] = schema.NewSet(transitionHash, transitions) 812 } 813 // noncurrent_version_transition 814 if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { 815 transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) 816 for _, v := range lifecycleRule.NoncurrentVersionTransitions { 817 t := make(map[string]interface{}) 818 if v.NoncurrentDays != nil { 819 t["days"] = int(*v.NoncurrentDays) 820 } 821 if v.StorageClass != nil { 822 t["storage_class"] = *v.StorageClass 823 } 824 transitions = append(transitions, t) 825 } 826 rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) 827 } 828 829 rules = append(rules, rule) 830 } 831 832 if err := d.Set("lifecycle_rule", rules); err != nil { 833 return err 834 } 835 } 836 837 // Read the bucket replication configuration 838 replication, err := s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ 839 Bucket: aws.String(d.Id()), 840 }) 841 if err != nil { 842 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { 843 return err 844 } 845 } 846 847 log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication) 848 if r := replication.ReplicationConfiguration; r != nil { 849 if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil { 850 log.Printf("[DEBUG] Error setting replication configuration: %s", err) 851 return err 852 } 853 } 854 855 // Add the region as an attribute 856 location, err := s3conn.GetBucketLocation( 857 &s3.GetBucketLocationInput{ 858 Bucket: aws.String(d.Id()), 859 }, 860 ) 861 if err != nil { 862 return err 863 } 864 var region string 865 if location.LocationConstraint != nil { 866 region = *location.LocationConstraint 867 } 868 region = normalizeRegion(region) 869 if err := d.Set("region", region); err != nil { 870 return err 871 } 872 873 // Add the hosted zone ID for this bucket's region as an attribute 874 hostedZoneID := HostedZoneIDForRegion(region) 875 if err := d.Set("hosted_zone_id", hostedZoneID); err != nil { 876 return err 877 } 878 879 // Add website_endpoint as an attribute 880 websiteEndpoint, err := websiteEndpoint(s3conn, d) 881 if err != nil { 882 return err 883 } 884 if websiteEndpoint != nil { 885 if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil { 886 return err 887 } 888 if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil { 889 return err 890 } 891 } 892 893 tagSet, err := getTagSetS3(s3conn, d.Id()) 894 if err != nil { 895 return err 896 } 897 898 if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { 899 return err 900 } 901 902 d.Set("arn", fmt.Sprint("arn:aws:s3:::", d.Id())) 903 904 return nil 905 } 906 907 func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { 908 s3conn := meta.(*AWSClient).s3conn 909 910 log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id()) 911 _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{ 912 Bucket: aws.String(d.Id()), 913 }) 914 if err != nil { 915 ec2err, ok := err.(awserr.Error) 916 if ok && ec2err.Code() == "BucketNotEmpty" { 917 if d.Get("force_destroy").(bool) { 918 // bucket may have things delete them 919 log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) 920 921 bucket := d.Get("bucket").(string) 922 resp, err := s3conn.ListObjectVersions( 923 &s3.ListObjectVersionsInput{ 924 Bucket: aws.String(bucket), 925 }, 926 ) 927 928 if err != nil { 929 return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) 930 } 931 932 objectsToDelete := make([]*s3.ObjectIdentifier, 0) 933 934 if len(resp.DeleteMarkers) != 0 { 935 936 for _, v := range resp.DeleteMarkers { 937 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 938 Key: v.Key, 939 VersionId: v.VersionId, 940 }) 941 } 942 } 943 944 if len(resp.Versions) != 0 { 945 for _, v := range resp.Versions { 946 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 947 Key: v.Key, 948 VersionId: v.VersionId, 949 }) 950 } 951 } 952 953 params := &s3.DeleteObjectsInput{ 954 Bucket: aws.String(bucket), 955 Delete: &s3.Delete{ 956 Objects: objectsToDelete, 957 }, 958 } 959 960 _, err = s3conn.DeleteObjects(params) 961 962 if err != nil { 963 return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) 964 } 965 966 // this line recurses until all objects are deleted or an error is returned 967 return resourceAwsS3BucketDelete(d, meta) 968 } 969 } 970 return fmt.Errorf("Error deleting S3 Bucket: %s", err) 971 } 972 return nil 973 } 974 975 func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 976 bucket := d.Get("bucket").(string) 977 policy := d.Get("policy").(string) 978 979 if policy != "" { 980 log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) 981 982 params := &s3.PutBucketPolicyInput{ 983 Bucket: aws.String(bucket), 984 Policy: aws.String(policy), 985 } 986 987 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 988 if _, err := s3conn.PutBucketPolicy(params); err != nil { 989 if awserr, ok := err.(awserr.Error); ok { 990 if awserr.Code() == "MalformedPolicy" { 991 return resource.RetryableError(awserr) 992 } 993 } 994 return resource.NonRetryableError(err) 995 } 996 return nil 997 }) 998 999 if err != nil { 1000 return fmt.Errorf("Error putting S3 policy: %s", err) 1001 } 1002 } else { 1003 log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy) 1004 _, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ 1005 Bucket: aws.String(bucket), 1006 }) 1007 1008 if err != nil { 1009 return fmt.Errorf("Error deleting S3 policy: %s", err) 1010 } 1011 } 1012 1013 return nil 1014 } 1015 1016 func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1017 bucket := d.Get("bucket").(string) 1018 rawCors := d.Get("cors_rule").([]interface{}) 1019 1020 if len(rawCors) == 0 { 1021 // Delete CORS 1022 log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket) 1023 _, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ 1024 Bucket: aws.String(bucket), 1025 }) 1026 if err != nil { 1027 return fmt.Errorf("Error deleting S3 CORS: %s", err) 1028 } 1029 } else { 1030 // Put CORS 1031 rules := make([]*s3.CORSRule, 0, len(rawCors)) 1032 for _, cors := range rawCors { 1033 corsMap := cors.(map[string]interface{}) 1034 r := &s3.CORSRule{} 1035 for k, v := range corsMap { 1036 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v) 1037 if k == "max_age_seconds" { 1038 r.MaxAgeSeconds = aws.Int64(int64(v.(int))) 1039 } else { 1040 vMap := make([]*string, len(v.([]interface{}))) 1041 for i, vv := range v.([]interface{}) { 1042 str := vv.(string) 1043 vMap[i] = aws.String(str) 1044 } 1045 switch k { 1046 case "allowed_headers": 1047 r.AllowedHeaders = vMap 1048 case "allowed_methods": 1049 r.AllowedMethods = vMap 1050 case "allowed_origins": 1051 r.AllowedOrigins = vMap 1052 case "expose_headers": 1053 r.ExposeHeaders = vMap 1054 } 1055 } 1056 } 1057 rules = append(rules, r) 1058 } 1059 corsInput := &s3.PutBucketCorsInput{ 1060 Bucket: aws.String(bucket), 1061 CORSConfiguration: &s3.CORSConfiguration{ 1062 CORSRules: rules, 1063 }, 1064 } 1065 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput) 1066 _, err := s3conn.PutBucketCors(corsInput) 1067 if err != nil { 1068 return fmt.Errorf("Error putting S3 CORS: %s", err) 1069 } 1070 } 1071 1072 return nil 1073 } 1074 1075 func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1076 ws := d.Get("website").([]interface{}) 1077 1078 if len(ws) == 1 { 1079 var w map[string]interface{} 1080 if ws[0] != nil { 1081 w = ws[0].(map[string]interface{}) 1082 } else { 1083 w = make(map[string]interface{}) 1084 } 1085 return resourceAwsS3BucketWebsitePut(s3conn, d, w) 1086 } else if len(ws) == 0 { 1087 return resourceAwsS3BucketWebsiteDelete(s3conn, d) 1088 } else { 1089 return fmt.Errorf("Cannot specify more than one website.") 1090 } 1091 } 1092 1093 func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error { 1094 bucket := d.Get("bucket").(string) 1095 1096 var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string 1097 if v, ok := website["index_document"]; ok { 1098 indexDocument = v.(string) 1099 } 1100 if v, ok := website["error_document"]; ok { 1101 errorDocument = v.(string) 1102 } 1103 if v, ok := website["redirect_all_requests_to"]; ok { 1104 redirectAllRequestsTo = v.(string) 1105 } 1106 if v, ok := website["routing_rules"]; ok { 1107 routingRules = v.(string) 1108 } 1109 1110 if indexDocument == "" && redirectAllRequestsTo == "" { 1111 return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.") 1112 } 1113 1114 websiteConfiguration := &s3.WebsiteConfiguration{} 1115 1116 if indexDocument != "" { 1117 websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)} 1118 } 1119 1120 if errorDocument != "" { 1121 websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)} 1122 } 1123 1124 if redirectAllRequestsTo != "" { 1125 redirect, err := url.Parse(redirectAllRequestsTo) 1126 if err == nil && redirect.Scheme != "" { 1127 var redirectHostBuf bytes.Buffer 1128 redirectHostBuf.WriteString(redirect.Host) 1129 if redirect.Path != "" { 1130 redirectHostBuf.WriteString(redirect.Path) 1131 } 1132 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)} 1133 } else { 1134 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} 1135 } 1136 } 1137 1138 if routingRules != "" { 1139 var unmarshaledRules []*s3.RoutingRule 1140 if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil { 1141 return err 1142 } 1143 websiteConfiguration.RoutingRules = unmarshaledRules 1144 } 1145 1146 putInput := &s3.PutBucketWebsiteInput{ 1147 Bucket: aws.String(bucket), 1148 WebsiteConfiguration: websiteConfiguration, 1149 } 1150 1151 log.Printf("[DEBUG] S3 put bucket website: %#v", putInput) 1152 1153 _, err := s3conn.PutBucketWebsite(putInput) 1154 if err != nil { 1155 return fmt.Errorf("Error putting S3 website: %s", err) 1156 } 1157 1158 return nil 1159 } 1160 1161 func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error { 1162 bucket := d.Get("bucket").(string) 1163 deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)} 1164 1165 log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput) 1166 1167 _, err := s3conn.DeleteBucketWebsite(deleteInput) 1168 if err != nil { 1169 return fmt.Errorf("Error deleting S3 website: %s", err) 1170 } 1171 1172 d.Set("website_endpoint", "") 1173 d.Set("website_domain", "") 1174 1175 return nil 1176 } 1177 1178 func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) { 1179 // If the bucket doesn't have a website configuration, return an empty 1180 // endpoint 1181 if _, ok := d.GetOk("website"); !ok { 1182 return nil, nil 1183 } 1184 1185 bucket := d.Get("bucket").(string) 1186 1187 // Lookup the region for this bucket 1188 location, err := s3conn.GetBucketLocation( 1189 &s3.GetBucketLocationInput{ 1190 Bucket: aws.String(bucket), 1191 }, 1192 ) 1193 if err != nil { 1194 return nil, err 1195 } 1196 var region string 1197 if location.LocationConstraint != nil { 1198 region = *location.LocationConstraint 1199 } 1200 1201 return WebsiteEndpoint(bucket, region), nil 1202 } 1203 1204 func WebsiteEndpoint(bucket string, region string) *S3Website { 1205 domain := WebsiteDomainUrl(region) 1206 return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} 1207 } 1208 1209 func WebsiteDomainUrl(region string) string { 1210 region = normalizeRegion(region) 1211 1212 // New regions uses different syntax for website endpoints 1213 // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html 1214 if isOldRegion(region) { 1215 return fmt.Sprintf("s3-website-%s.amazonaws.com", region) 1216 } 1217 return fmt.Sprintf("s3-website.%s.amazonaws.com", region) 1218 } 1219 1220 func isOldRegion(region string) bool { 1221 oldRegions := []string{ 1222 "ap-northeast-1", 1223 "ap-southeast-1", 1224 "ap-southeast-2", 1225 "eu-west-1", 1226 "sa-east-1", 1227 "us-east-1", 1228 "us-gov-west-1", 1229 "us-west-1", 1230 "us-west-2", 1231 } 1232 for _, r := range oldRegions { 1233 if region == r { 1234 return true 1235 } 1236 } 1237 return false 1238 } 1239 1240 func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1241 acl := d.Get("acl").(string) 1242 bucket := d.Get("bucket").(string) 1243 1244 i := &s3.PutBucketAclInput{ 1245 Bucket: aws.String(bucket), 1246 ACL: aws.String(acl), 1247 } 1248 log.Printf("[DEBUG] S3 put bucket ACL: %#v", i) 1249 1250 _, err := s3conn.PutBucketAcl(i) 1251 if err != nil { 1252 return fmt.Errorf("Error putting S3 ACL: %s", err) 1253 } 1254 1255 return nil 1256 } 1257 1258 func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1259 v := d.Get("versioning").([]interface{}) 1260 bucket := d.Get("bucket").(string) 1261 vc := &s3.VersioningConfiguration{} 1262 1263 if len(v) > 0 { 1264 c := v[0].(map[string]interface{}) 1265 1266 if c["enabled"].(bool) { 1267 vc.Status = aws.String(s3.BucketVersioningStatusEnabled) 1268 } else { 1269 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1270 } 1271 1272 if c["mfa_delete"].(bool) { 1273 vc.MFADelete = aws.String(s3.MFADeleteEnabled) 1274 } else { 1275 vc.MFADelete = aws.String(s3.MFADeleteDisabled) 1276 } 1277 1278 } else { 1279 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1280 } 1281 1282 i := &s3.PutBucketVersioningInput{ 1283 Bucket: aws.String(bucket), 1284 VersioningConfiguration: vc, 1285 } 1286 log.Printf("[DEBUG] S3 put bucket versioning: %#v", i) 1287 1288 _, err := s3conn.PutBucketVersioning(i) 1289 if err != nil { 1290 return fmt.Errorf("Error putting S3 versioning: %s", err) 1291 } 1292 1293 return nil 1294 } 1295 1296 func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1297 logging := d.Get("logging").(*schema.Set).List() 1298 bucket := d.Get("bucket").(string) 1299 loggingStatus := &s3.BucketLoggingStatus{} 1300 1301 if len(logging) > 0 { 1302 c := logging[0].(map[string]interface{}) 1303 1304 loggingEnabled := &s3.LoggingEnabled{} 1305 if val, ok := c["target_bucket"]; ok { 1306 loggingEnabled.TargetBucket = aws.String(val.(string)) 1307 } 1308 if val, ok := c["target_prefix"]; ok { 1309 loggingEnabled.TargetPrefix = aws.String(val.(string)) 1310 } 1311 1312 loggingStatus.LoggingEnabled = loggingEnabled 1313 } 1314 1315 i := &s3.PutBucketLoggingInput{ 1316 Bucket: aws.String(bucket), 1317 BucketLoggingStatus: loggingStatus, 1318 } 1319 log.Printf("[DEBUG] S3 put bucket logging: %#v", i) 1320 1321 _, err := s3conn.PutBucketLogging(i) 1322 if err != nil { 1323 return fmt.Errorf("Error putting S3 logging: %s", err) 1324 } 1325 1326 return nil 1327 } 1328 1329 func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1330 bucket := d.Get("bucket").(string) 1331 enableAcceleration := d.Get("acceleration_status").(string) 1332 1333 i := &s3.PutBucketAccelerateConfigurationInput{ 1334 Bucket: aws.String(bucket), 1335 AccelerateConfiguration: &s3.AccelerateConfiguration{ 1336 Status: aws.String(enableAcceleration), 1337 }, 1338 } 1339 log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i) 1340 1341 _, err := s3conn.PutBucketAccelerateConfiguration(i) 1342 if err != nil { 1343 return fmt.Errorf("Error putting S3 acceleration: %s", err) 1344 } 1345 1346 return nil 1347 } 1348 1349 func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1350 bucket := d.Get("bucket").(string) 1351 payer := d.Get("request_payer").(string) 1352 1353 i := &s3.PutBucketRequestPaymentInput{ 1354 Bucket: aws.String(bucket), 1355 RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ 1356 Payer: aws.String(payer), 1357 }, 1358 } 1359 log.Printf("[DEBUG] S3 put bucket request payer: %#v", i) 1360 1361 _, err := s3conn.PutBucketRequestPayment(i) 1362 if err != nil { 1363 return fmt.Errorf("Error putting S3 request payer: %s", err) 1364 } 1365 1366 return nil 1367 } 1368 1369 func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1370 bucket := d.Get("bucket").(string) 1371 replicationConfiguration := d.Get("replication_configuration").([]interface{}) 1372 1373 if len(replicationConfiguration) == 0 { 1374 i := &s3.DeleteBucketReplicationInput{ 1375 Bucket: aws.String(bucket), 1376 } 1377 1378 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1379 if _, err := s3conn.DeleteBucketReplication(i); err != nil { 1380 return resource.NonRetryableError(err) 1381 } 1382 return nil 1383 }) 1384 if err != nil { 1385 return fmt.Errorf("Error removing S3 bucket replication: %s", err) 1386 } 1387 return nil 1388 } 1389 1390 hasVersioning := false 1391 // Validate that bucket versioning is enabled 1392 if versioning, ok := d.GetOk("versioning"); ok { 1393 v := versioning.([]interface{}) 1394 1395 if v[0].(map[string]interface{})["enabled"].(bool) { 1396 hasVersioning = true 1397 } 1398 } 1399 1400 if !hasVersioning { 1401 return fmt.Errorf("versioning must be enabled to allow S3 bucket replication") 1402 } 1403 1404 c := replicationConfiguration[0].(map[string]interface{}) 1405 1406 rc := &s3.ReplicationConfiguration{} 1407 if val, ok := c["role"]; ok { 1408 rc.Role = aws.String(val.(string)) 1409 } 1410 1411 rcRules := c["rules"].(*schema.Set).List() 1412 rules := []*s3.ReplicationRule{} 1413 for _, v := range rcRules { 1414 rr := v.(map[string]interface{}) 1415 rcRule := &s3.ReplicationRule{ 1416 Prefix: aws.String(rr["prefix"].(string)), 1417 Status: aws.String(rr["status"].(string)), 1418 } 1419 1420 if rrid, ok := rr["id"]; ok { 1421 rcRule.ID = aws.String(rrid.(string)) 1422 } 1423 1424 ruleDestination := &s3.Destination{} 1425 if destination, ok := rr["destination"]; ok { 1426 dest := destination.(*schema.Set).List() 1427 1428 bd := dest[0].(map[string]interface{}) 1429 ruleDestination.Bucket = aws.String(bd["bucket"].(string)) 1430 1431 if storageClass, ok := bd["storage_class"]; ok { 1432 ruleDestination.StorageClass = aws.String(storageClass.(string)) 1433 } 1434 } 1435 rcRule.Destination = ruleDestination 1436 1437 rules = append(rules, rcRule) 1438 } 1439 1440 rc.Rules = rules 1441 i := &s3.PutBucketReplicationInput{ 1442 Bucket: aws.String(bucket), 1443 ReplicationConfiguration: rc, 1444 } 1445 log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) 1446 1447 _, err := s3conn.PutBucketReplication(i) 1448 if err != nil { 1449 return fmt.Errorf("Error putting S3 replication configuration: %s", err) 1450 } 1451 1452 return nil 1453 } 1454 1455 func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1456 bucket := d.Get("bucket").(string) 1457 1458 lifecycleRules := d.Get("lifecycle_rule").([]interface{}) 1459 1460 if len(lifecycleRules) == 0 { 1461 i := &s3.DeleteBucketLifecycleInput{ 1462 Bucket: aws.String(bucket), 1463 } 1464 1465 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1466 if _, err := s3conn.DeleteBucketLifecycle(i); err != nil { 1467 return resource.NonRetryableError(err) 1468 } 1469 return nil 1470 }) 1471 if err != nil { 1472 return fmt.Errorf("Error removing S3 lifecycle: %s", err) 1473 } 1474 return nil 1475 } 1476 1477 rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) 1478 1479 for i, lifecycleRule := range lifecycleRules { 1480 r := lifecycleRule.(map[string]interface{}) 1481 1482 rule := &s3.LifecycleRule{ 1483 Prefix: aws.String(r["prefix"].(string)), 1484 } 1485 1486 // ID 1487 if val, ok := r["id"].(string); ok && val != "" { 1488 rule.ID = aws.String(val) 1489 } else { 1490 rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) 1491 } 1492 1493 // Enabled 1494 if val, ok := r["enabled"].(bool); ok && val { 1495 rule.Status = aws.String(s3.ExpirationStatusEnabled) 1496 } else { 1497 rule.Status = aws.String(s3.ExpirationStatusDisabled) 1498 } 1499 1500 // AbortIncompleteMultipartUpload 1501 if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { 1502 rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ 1503 DaysAfterInitiation: aws.Int64(int64(val)), 1504 } 1505 } 1506 1507 // Expiration 1508 expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List() 1509 if len(expiration) > 0 { 1510 e := expiration[0].(map[string]interface{}) 1511 i := &s3.LifecycleExpiration{} 1512 1513 if val, ok := e["date"].(string); ok && val != "" { 1514 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1515 if err != nil { 1516 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1517 } 1518 i.Date = aws.Time(t) 1519 } else if val, ok := e["days"].(int); ok && val > 0 { 1520 i.Days = aws.Int64(int64(val)) 1521 } else if val, ok := e["expired_object_delete_marker"].(bool); ok { 1522 i.ExpiredObjectDeleteMarker = aws.Bool(val) 1523 } 1524 rule.Expiration = i 1525 } 1526 1527 // NoncurrentVersionExpiration 1528 nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List() 1529 if len(nc_expiration) > 0 { 1530 e := nc_expiration[0].(map[string]interface{}) 1531 1532 if val, ok := e["days"].(int); ok && val > 0 { 1533 rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ 1534 NoncurrentDays: aws.Int64(int64(val)), 1535 } 1536 } 1537 } 1538 1539 // Transitions 1540 transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() 1541 if len(transitions) > 0 { 1542 rule.Transitions = make([]*s3.Transition, 0, len(transitions)) 1543 for _, transition := range transitions { 1544 transition := transition.(map[string]interface{}) 1545 i := &s3.Transition{} 1546 if val, ok := transition["date"].(string); ok && val != "" { 1547 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1548 if err != nil { 1549 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1550 } 1551 i.Date = aws.Time(t) 1552 } else if val, ok := transition["days"].(int); ok && val > 0 { 1553 i.Days = aws.Int64(int64(val)) 1554 } 1555 if val, ok := transition["storage_class"].(string); ok && val != "" { 1556 i.StorageClass = aws.String(val) 1557 } 1558 1559 rule.Transitions = append(rule.Transitions, i) 1560 } 1561 } 1562 // NoncurrentVersionTransitions 1563 nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() 1564 if len(nc_transitions) > 0 { 1565 rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) 1566 for _, transition := range nc_transitions { 1567 transition := transition.(map[string]interface{}) 1568 i := &s3.NoncurrentVersionTransition{} 1569 if val, ok := transition["days"].(int); ok && val > 0 { 1570 i.NoncurrentDays = aws.Int64(int64(val)) 1571 } 1572 if val, ok := transition["storage_class"].(string); ok && val != "" { 1573 i.StorageClass = aws.String(val) 1574 } 1575 1576 rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) 1577 } 1578 } 1579 1580 rules = append(rules, rule) 1581 } 1582 1583 i := &s3.PutBucketLifecycleConfigurationInput{ 1584 Bucket: aws.String(bucket), 1585 LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ 1586 Rules: rules, 1587 }, 1588 } 1589 1590 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1591 if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil { 1592 return resource.NonRetryableError(err) 1593 } 1594 return nil 1595 }) 1596 if err != nil { 1597 return fmt.Errorf("Error putting S3 lifecycle: %s", err) 1598 } 1599 1600 return nil 1601 } 1602 1603 func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration) []map[string]interface{} { 1604 replication_configuration := make([]map[string]interface{}, 0, 1) 1605 m := make(map[string]interface{}) 1606 1607 if r.Role != nil && *r.Role != "" { 1608 m["role"] = *r.Role 1609 } 1610 1611 rules := make([]interface{}, 0, len(r.Rules)) 1612 for _, v := range r.Rules { 1613 t := make(map[string]interface{}) 1614 if v.Destination != nil { 1615 rd := make(map[string]interface{}) 1616 if v.Destination.Bucket != nil { 1617 rd["bucket"] = *v.Destination.Bucket 1618 } 1619 if v.Destination.StorageClass != nil { 1620 rd["storage_class"] = *v.Destination.StorageClass 1621 } 1622 t["destination"] = schema.NewSet(destinationHash, []interface{}{rd}) 1623 } 1624 1625 if v.ID != nil { 1626 t["id"] = *v.ID 1627 } 1628 if v.Prefix != nil { 1629 t["prefix"] = *v.Prefix 1630 } 1631 if v.Status != nil { 1632 t["status"] = *v.Status 1633 } 1634 rules = append(rules, t) 1635 } 1636 m["rules"] = schema.NewSet(rulesHash, rules) 1637 1638 replication_configuration = append(replication_configuration, m) 1639 1640 return replication_configuration 1641 } 1642 1643 func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { 1644 withNulls, err := json.Marshal(w) 1645 if err != nil { 1646 return "", err 1647 } 1648 1649 var rules []map[string]interface{} 1650 if err := json.Unmarshal(withNulls, &rules); err != nil { 1651 return "", err 1652 } 1653 1654 var cleanRules []map[string]interface{} 1655 for _, rule := range rules { 1656 cleanRules = append(cleanRules, removeNil(rule)) 1657 } 1658 1659 withoutNulls, err := json.Marshal(cleanRules) 1660 if err != nil { 1661 return "", err 1662 } 1663 1664 return string(withoutNulls), nil 1665 } 1666 1667 func removeNil(data map[string]interface{}) map[string]interface{} { 1668 withoutNil := make(map[string]interface{}) 1669 1670 for k, v := range data { 1671 if v == nil { 1672 continue 1673 } 1674 1675 switch v.(type) { 1676 case map[string]interface{}: 1677 withoutNil[k] = removeNil(v.(map[string]interface{})) 1678 default: 1679 withoutNil[k] = v 1680 } 1681 } 1682 1683 return withoutNil 1684 } 1685 1686 // DEPRECATED. Please consider using `normalizeJsonString` function instead. 1687 func normalizeJson(jsonString interface{}) string { 1688 if jsonString == nil || jsonString == "" { 1689 return "" 1690 } 1691 var j interface{} 1692 err := json.Unmarshal([]byte(jsonString.(string)), &j) 1693 if err != nil { 1694 return fmt.Sprintf("Error parsing JSON: %s", err) 1695 } 1696 b, _ := json.Marshal(j) 1697 return string(b[:]) 1698 } 1699 1700 func normalizeRegion(region string) string { 1701 // Default to us-east-1 if the bucket doesn't have a region: 1702 // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html 1703 if region == "" { 1704 region = "us-east-1" 1705 } 1706 1707 return region 1708 } 1709 1710 func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) { 1711 validTypes := map[string]struct{}{ 1712 "Enabled": struct{}{}, 1713 "Suspended": struct{}{}, 1714 } 1715 1716 if _, ok := validTypes[v.(string)]; !ok { 1717 errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended")) 1718 } 1719 return 1720 } 1721 1722 func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) { 1723 value := v.(string) 1724 if value != s3.PayerRequester && value != s3.PayerBucketOwner { 1725 errors = append(errors, fmt.Errorf( 1726 "%q contains an invalid Request Payer type %q. Valid types are either %q or %q", 1727 k, value, s3.PayerRequester, s3.PayerBucketOwner)) 1728 } 1729 return 1730 } 1731 1732 func expirationHash(v interface{}) int { 1733 var buf bytes.Buffer 1734 m := v.(map[string]interface{}) 1735 if v, ok := m["date"]; ok { 1736 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1737 } 1738 if v, ok := m["days"]; ok { 1739 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1740 } 1741 if v, ok := m["expired_object_delete_marker"]; ok { 1742 buf.WriteString(fmt.Sprintf("%t-", v.(bool))) 1743 } 1744 return hashcode.String(buf.String()) 1745 } 1746 1747 func transitionHash(v interface{}) int { 1748 var buf bytes.Buffer 1749 m := v.(map[string]interface{}) 1750 if v, ok := m["date"]; ok { 1751 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1752 } 1753 if v, ok := m["days"]; ok { 1754 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1755 } 1756 if v, ok := m["storage_class"]; ok { 1757 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1758 } 1759 return hashcode.String(buf.String()) 1760 } 1761 1762 func rulesHash(v interface{}) int { 1763 var buf bytes.Buffer 1764 m := v.(map[string]interface{}) 1765 1766 if v, ok := m["id"]; ok { 1767 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1768 } 1769 if v, ok := m["prefix"]; ok { 1770 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1771 } 1772 if v, ok := m["status"]; ok { 1773 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1774 } 1775 return hashcode.String(buf.String()) 1776 } 1777 1778 func destinationHash(v interface{}) int { 1779 var buf bytes.Buffer 1780 m := v.(map[string]interface{}) 1781 1782 if v, ok := m["bucket"]; ok { 1783 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1784 } 1785 if v, ok := m["storage_class"]; ok { 1786 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1787 } 1788 return hashcode.String(buf.String()) 1789 } 1790 1791 type S3Website struct { 1792 Endpoint, Domain string 1793 }