github.com/vtorhonen/terraform@v0.9.0-beta2.0.20170307220345-5d894e4ffda7/builtin/providers/aws/resource_aws_s3_bucket.go (about) 1 package aws 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "log" 8 "net/url" 9 "regexp" 10 "strings" 11 "time" 12 13 "github.com/aws/aws-sdk-go/aws" 14 "github.com/aws/aws-sdk-go/aws/awserr" 15 "github.com/aws/aws-sdk-go/service/s3" 16 "github.com/hashicorp/errwrap" 17 "github.com/hashicorp/terraform/helper/hashcode" 18 "github.com/hashicorp/terraform/helper/resource" 19 "github.com/hashicorp/terraform/helper/schema" 20 ) 21 22 func resourceAwsS3Bucket() *schema.Resource { 23 return &schema.Resource{ 24 Create: resourceAwsS3BucketCreate, 25 Read: resourceAwsS3BucketRead, 26 Update: resourceAwsS3BucketUpdate, 27 Delete: resourceAwsS3BucketDelete, 28 Importer: &schema.ResourceImporter{ 29 State: resourceAwsS3BucketImportState, 30 }, 31 32 Schema: map[string]*schema.Schema{ 33 "bucket": { 34 Type: schema.TypeString, 35 Required: true, 36 ForceNew: true, 37 }, 38 39 "bucket_domain_name": { 40 Type: schema.TypeString, 41 Computed: true, 42 }, 43 44 "arn": { 45 Type: schema.TypeString, 46 Optional: true, 47 Computed: true, 48 }, 49 50 "acl": { 51 Type: schema.TypeString, 52 Default: "private", 53 Optional: true, 54 }, 55 56 "policy": { 57 Type: schema.TypeString, 58 Optional: true, 59 ValidateFunc: validateJsonString, 60 DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, 61 }, 62 63 "cors_rule": { 64 Type: schema.TypeList, 65 Optional: true, 66 Elem: &schema.Resource{ 67 Schema: map[string]*schema.Schema{ 68 "allowed_headers": { 69 Type: schema.TypeList, 70 Optional: true, 71 Elem: &schema.Schema{Type: schema.TypeString}, 72 }, 73 "allowed_methods": { 74 Type: schema.TypeList, 75 Required: true, 76 Elem: &schema.Schema{Type: schema.TypeString}, 77 }, 78 "allowed_origins": { 79 Type: schema.TypeList, 80 Required: true, 81 Elem: &schema.Schema{Type: schema.TypeString}, 82 }, 83 "expose_headers": { 84 Type: schema.TypeList, 85 Optional: true, 86 Elem: &schema.Schema{Type: schema.TypeString}, 87 }, 88 "max_age_seconds": { 89 Type: schema.TypeInt, 90 Optional: true, 91 }, 92 }, 93 }, 94 }, 95 96 "website": { 97 Type: schema.TypeList, 98 Optional: true, 99 Elem: &schema.Resource{ 100 Schema: map[string]*schema.Schema{ 101 "index_document": { 102 Type: schema.TypeString, 103 Optional: true, 104 }, 105 106 "error_document": { 107 Type: schema.TypeString, 108 Optional: true, 109 }, 110 111 "redirect_all_requests_to": { 112 Type: schema.TypeString, 113 ConflictsWith: []string{ 114 "website.0.index_document", 115 "website.0.error_document", 116 "website.0.routing_rules", 117 }, 118 Optional: true, 119 }, 120 121 "routing_rules": { 122 Type: schema.TypeString, 123 Optional: true, 124 ValidateFunc: validateJsonString, 125 StateFunc: func(v interface{}) string { 126 json, _ := normalizeJsonString(v) 127 return json 128 }, 129 }, 130 }, 131 }, 132 }, 133 134 "hosted_zone_id": { 135 Type: schema.TypeString, 136 Optional: true, 137 Computed: true, 138 }, 139 140 "region": { 141 Type: schema.TypeString, 142 Optional: true, 143 Computed: true, 144 }, 145 "website_endpoint": { 146 Type: schema.TypeString, 147 Optional: true, 148 Computed: true, 149 }, 150 "website_domain": { 151 Type: schema.TypeString, 152 Optional: true, 153 Computed: true, 154 }, 155 156 "versioning": { 157 Type: schema.TypeList, 158 Optional: true, 159 Computed: true, 160 MaxItems: 1, 161 Elem: &schema.Resource{ 162 Schema: map[string]*schema.Schema{ 163 "enabled": { 164 Type: schema.TypeBool, 165 Optional: true, 166 Default: false, 167 }, 168 "mfa_delete": { 169 Type: schema.TypeBool, 170 Optional: true, 171 Default: false, 172 }, 173 }, 174 }, 175 }, 176 177 "logging": { 178 Type: schema.TypeSet, 179 Optional: true, 180 Elem: &schema.Resource{ 181 Schema: map[string]*schema.Schema{ 182 "target_bucket": { 183 Type: schema.TypeString, 184 Required: true, 185 }, 186 "target_prefix": { 187 Type: schema.TypeString, 188 Optional: true, 189 }, 190 }, 191 }, 192 Set: func(v interface{}) int { 193 var buf bytes.Buffer 194 m := v.(map[string]interface{}) 195 buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"])) 196 buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"])) 197 return hashcode.String(buf.String()) 198 }, 199 }, 200 201 "lifecycle_rule": { 202 Type: schema.TypeList, 203 Optional: true, 204 Elem: &schema.Resource{ 205 Schema: map[string]*schema.Schema{ 206 "id": { 207 Type: schema.TypeString, 208 Optional: true, 209 Computed: true, 210 ValidateFunc: validateS3BucketLifecycleRuleId, 211 }, 212 "prefix": { 213 Type: schema.TypeString, 214 Required: true, 215 }, 216 "enabled": { 217 Type: schema.TypeBool, 218 Required: true, 219 }, 220 "abort_incomplete_multipart_upload_days": { 221 Type: schema.TypeInt, 222 Optional: true, 223 }, 224 "expiration": { 225 Type: schema.TypeSet, 226 Optional: true, 227 Set: expirationHash, 228 Elem: &schema.Resource{ 229 Schema: map[string]*schema.Schema{ 230 "date": { 231 Type: schema.TypeString, 232 Optional: true, 233 ValidateFunc: validateS3BucketLifecycleTimestamp, 234 }, 235 "days": { 236 Type: schema.TypeInt, 237 Optional: true, 238 }, 239 "expired_object_delete_marker": { 240 Type: schema.TypeBool, 241 Optional: true, 242 }, 243 }, 244 }, 245 }, 246 "noncurrent_version_expiration": { 247 Type: schema.TypeSet, 248 Optional: true, 249 Set: expirationHash, 250 Elem: &schema.Resource{ 251 Schema: map[string]*schema.Schema{ 252 "days": { 253 Type: schema.TypeInt, 254 Optional: true, 255 }, 256 }, 257 }, 258 }, 259 "transition": { 260 Type: schema.TypeSet, 261 Optional: true, 262 Set: transitionHash, 263 Elem: &schema.Resource{ 264 Schema: map[string]*schema.Schema{ 265 "date": { 266 Type: schema.TypeString, 267 Optional: true, 268 ValidateFunc: validateS3BucketLifecycleTimestamp, 269 }, 270 "days": { 271 Type: schema.TypeInt, 272 Optional: true, 273 }, 274 "storage_class": { 275 Type: schema.TypeString, 276 Required: true, 277 ValidateFunc: validateS3BucketLifecycleStorageClass, 278 }, 279 }, 280 }, 281 }, 282 "noncurrent_version_transition": { 283 Type: schema.TypeSet, 284 Optional: true, 285 Set: transitionHash, 286 Elem: &schema.Resource{ 287 Schema: map[string]*schema.Schema{ 288 "days": { 289 Type: schema.TypeInt, 290 Optional: true, 291 }, 292 "storage_class": { 293 Type: schema.TypeString, 294 Required: true, 295 ValidateFunc: validateS3BucketLifecycleStorageClass, 296 }, 297 }, 298 }, 299 }, 300 }, 301 }, 302 }, 303 304 "force_destroy": { 305 Type: schema.TypeBool, 306 Optional: true, 307 Default: false, 308 }, 309 310 "acceleration_status": { 311 Type: schema.TypeString, 312 Optional: true, 313 Computed: true, 314 ValidateFunc: validateS3BucketAccelerationStatus, 315 }, 316 317 "request_payer": { 318 Type: schema.TypeString, 319 Optional: true, 320 Computed: true, 321 ValidateFunc: validateS3BucketRequestPayerType, 322 }, 323 324 "replication_configuration": { 325 Type: schema.TypeList, 326 Optional: true, 327 MaxItems: 1, 328 Elem: &schema.Resource{ 329 Schema: map[string]*schema.Schema{ 330 "role": { 331 Type: schema.TypeString, 332 Required: true, 333 }, 334 "rules": { 335 Type: schema.TypeSet, 336 Required: true, 337 Set: rulesHash, 338 Elem: &schema.Resource{ 339 Schema: map[string]*schema.Schema{ 340 "id": { 341 Type: schema.TypeString, 342 Optional: true, 343 ValidateFunc: validateS3BucketReplicationRuleId, 344 }, 345 "destination": { 346 Type: schema.TypeSet, 347 MaxItems: 1, 348 MinItems: 1, 349 Required: true, 350 Set: destinationHash, 351 Elem: &schema.Resource{ 352 Schema: map[string]*schema.Schema{ 353 "bucket": { 354 Type: schema.TypeString, 355 Required: true, 356 ValidateFunc: validateArn, 357 }, 358 "storage_class": { 359 Type: schema.TypeString, 360 Optional: true, 361 ValidateFunc: validateS3BucketReplicationDestinationStorageClass, 362 }, 363 }, 364 }, 365 }, 366 "prefix": { 367 Type: schema.TypeString, 368 Required: true, 369 ValidateFunc: validateS3BucketReplicationRulePrefix, 370 }, 371 "status": { 372 Type: schema.TypeString, 373 Required: true, 374 ValidateFunc: validateS3BucketReplicationRuleStatus, 375 }, 376 }, 377 }, 378 }, 379 }, 380 }, 381 }, 382 383 "tags": tagsSchema(), 384 }, 385 } 386 } 387 388 func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { 389 s3conn := meta.(*AWSClient).s3conn 390 391 // Get the bucket and acl 392 bucket := d.Get("bucket").(string) 393 acl := d.Get("acl").(string) 394 395 log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl) 396 397 req := &s3.CreateBucketInput{ 398 Bucket: aws.String(bucket), 399 ACL: aws.String(acl), 400 } 401 402 var awsRegion string 403 if region, ok := d.GetOk("region"); ok { 404 awsRegion = region.(string) 405 } else { 406 awsRegion = meta.(*AWSClient).region 407 } 408 log.Printf("[DEBUG] S3 bucket create: %s, using region: %s", bucket, awsRegion) 409 410 // Special case us-east-1 region and do not set the LocationConstraint. 411 // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html 412 if awsRegion != "us-east-1" { 413 req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ 414 LocationConstraint: aws.String(awsRegion), 415 } 416 } 417 418 if err := validateS3BucketName(bucket, awsRegion); err != nil { 419 return fmt.Errorf("Error validating S3 bucket name: %s", err) 420 } 421 422 err := resource.Retry(5*time.Minute, func() *resource.RetryError { 423 log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket) 424 _, err := s3conn.CreateBucket(req) 425 if awsErr, ok := err.(awserr.Error); ok { 426 if awsErr.Code() == "OperationAborted" { 427 log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err) 428 return resource.RetryableError( 429 fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s", 430 bucket, err)) 431 } 432 } 433 if err != nil { 434 return resource.NonRetryableError(err) 435 } 436 437 return nil 438 }) 439 440 if err != nil { 441 return fmt.Errorf("Error creating S3 bucket: %s", err) 442 } 443 444 // Assign the bucket name as the resource ID 445 d.SetId(bucket) 446 447 return resourceAwsS3BucketUpdate(d, meta) 448 } 449 450 func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { 451 s3conn := meta.(*AWSClient).s3conn 452 if err := setTagsS3(s3conn, d); err != nil { 453 return fmt.Errorf("%q: %s", d.Get("bucket").(string), err) 454 } 455 456 if d.HasChange("policy") { 457 if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil { 458 return err 459 } 460 } 461 462 if d.HasChange("cors_rule") { 463 if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil { 464 return err 465 } 466 } 467 468 if d.HasChange("website") { 469 if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { 470 return err 471 } 472 } 473 474 if d.HasChange("versioning") { 475 if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil { 476 return err 477 } 478 } 479 if d.HasChange("acl") { 480 if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil { 481 return err 482 } 483 } 484 485 if d.HasChange("logging") { 486 if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil { 487 return err 488 } 489 } 490 491 if d.HasChange("lifecycle_rule") { 492 if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil { 493 return err 494 } 495 } 496 497 if d.HasChange("acceleration_status") { 498 if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil { 499 return err 500 } 501 } 502 503 if d.HasChange("request_payer") { 504 if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil { 505 return err 506 } 507 } 508 509 if d.HasChange("replication_configuration") { 510 if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil { 511 return err 512 } 513 } 514 515 return resourceAwsS3BucketRead(d, meta) 516 } 517 518 func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { 519 s3conn := meta.(*AWSClient).s3conn 520 521 var err error 522 _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ 523 Bucket: aws.String(d.Id()), 524 }) 525 if err != nil { 526 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { 527 log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) 528 d.SetId("") 529 return nil 530 } else { 531 // some of the AWS SDK's errors can be empty strings, so let's add 532 // some additional context. 533 return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) 534 } 535 } 536 537 // In the import case, we won't have this 538 if _, ok := d.GetOk("bucket"); !ok { 539 d.Set("bucket", d.Id()) 540 } 541 542 d.Set("bucket_domain_name", bucketDomainName(d.Get("bucket").(string))) 543 544 // Read the policy 545 if _, ok := d.GetOk("policy"); ok { 546 pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ 547 Bucket: aws.String(d.Id()), 548 }) 549 log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol) 550 if err != nil { 551 if err := d.Set("policy", ""); err != nil { 552 return err 553 } 554 } else { 555 if v := pol.Policy; v == nil { 556 if err := d.Set("policy", ""); err != nil { 557 return err 558 } 559 } else { 560 policy, err := normalizeJsonString(*v) 561 if err != nil { 562 return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) 563 } 564 d.Set("policy", policy) 565 } 566 } 567 } 568 569 // Read the CORS 570 cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ 571 Bucket: aws.String(d.Id()), 572 }) 573 if err != nil { 574 // An S3 Bucket might not have CORS configuration set. 575 if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" { 576 return err 577 } 578 log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id()) 579 } 580 log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) 581 if cors.CORSRules != nil { 582 rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) 583 for _, ruleObject := range cors.CORSRules { 584 rule := make(map[string]interface{}) 585 rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders) 586 rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods) 587 rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins) 588 // Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set. 589 if ruleObject.AllowedOrigins != nil { 590 rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders) 591 } 592 if ruleObject.MaxAgeSeconds != nil { 593 rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds) 594 } 595 rules = append(rules, rule) 596 } 597 if err := d.Set("cors_rule", rules); err != nil { 598 return err 599 } 600 } 601 602 // Read the website configuration 603 ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ 604 Bucket: aws.String(d.Id()), 605 }) 606 var websites []map[string]interface{} 607 if err == nil { 608 w := make(map[string]interface{}) 609 610 if v := ws.IndexDocument; v != nil { 611 w["index_document"] = *v.Suffix 612 } 613 614 if v := ws.ErrorDocument; v != nil { 615 w["error_document"] = *v.Key 616 } 617 618 if v := ws.RedirectAllRequestsTo; v != nil { 619 if v.Protocol == nil { 620 w["redirect_all_requests_to"] = *v.HostName 621 } else { 622 var host string 623 var path string 624 parsedHostName, err := url.Parse(*v.HostName) 625 if err == nil { 626 host = parsedHostName.Host 627 path = parsedHostName.Path 628 } else { 629 host = *v.HostName 630 path = "" 631 } 632 633 w["redirect_all_requests_to"] = (&url.URL{ 634 Host: host, 635 Path: path, 636 Scheme: *v.Protocol, 637 }).String() 638 } 639 } 640 641 if v := ws.RoutingRules; v != nil { 642 rr, err := normalizeRoutingRules(v) 643 if err != nil { 644 return fmt.Errorf("Error while marshaling routing rules: %s", err) 645 } 646 w["routing_rules"] = rr 647 } 648 649 websites = append(websites, w) 650 } 651 if err := d.Set("website", websites); err != nil { 652 return err 653 } 654 655 // Read the versioning configuration 656 versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{ 657 Bucket: aws.String(d.Id()), 658 }) 659 if err != nil { 660 return err 661 } 662 log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning) 663 if versioning != nil { 664 vcl := make([]map[string]interface{}, 0, 1) 665 vc := make(map[string]interface{}) 666 if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled { 667 vc["enabled"] = true 668 } else { 669 vc["enabled"] = false 670 } 671 672 if versioning.MFADelete != nil && *versioning.MFADelete == s3.MFADeleteEnabled { 673 vc["mfa_delete"] = true 674 } else { 675 vc["mfa_delete"] = false 676 } 677 vcl = append(vcl, vc) 678 if err := d.Set("versioning", vcl); err != nil { 679 return err 680 } 681 } 682 683 // Read the acceleration status 684 accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{ 685 Bucket: aws.String(d.Id()), 686 }) 687 if err != nil { 688 // Amazon S3 Transfer Acceleration might not be supported in the 689 // given region, for example, China (Beijing) and the Government 690 // Cloud does not support this feature at the moment. 691 if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" { 692 return err 693 } 694 695 var awsRegion string 696 if region, ok := d.GetOk("region"); ok { 697 awsRegion = region.(string) 698 } else { 699 awsRegion = meta.(*AWSClient).region 700 } 701 702 log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), awsRegion) 703 } else { 704 log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) 705 d.Set("acceleration_status", accelerate.Status) 706 } 707 708 // Read the request payer configuration. 709 payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{ 710 Bucket: aws.String(d.Id()), 711 }) 712 if err != nil { 713 return err 714 } 715 log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer) 716 if payer.Payer != nil { 717 if err := d.Set("request_payer", *payer.Payer); err != nil { 718 return err 719 } 720 } 721 722 // Read the logging configuration 723 logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ 724 Bucket: aws.String(d.Id()), 725 }) 726 if err != nil { 727 return err 728 } 729 730 log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging) 731 if v := logging.LoggingEnabled; v != nil { 732 lcl := make([]map[string]interface{}, 0, 1) 733 lc := make(map[string]interface{}) 734 if *v.TargetBucket != "" { 735 lc["target_bucket"] = *v.TargetBucket 736 } 737 if *v.TargetPrefix != "" { 738 lc["target_prefix"] = *v.TargetPrefix 739 } 740 lcl = append(lcl, lc) 741 if err := d.Set("logging", lcl); err != nil { 742 return err 743 } 744 } 745 746 // Read the lifecycle configuration 747 lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{ 748 Bucket: aws.String(d.Id()), 749 }) 750 if err != nil { 751 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { 752 return err 753 } 754 } 755 log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle) 756 if len(lifecycle.Rules) > 0 { 757 rules := make([]map[string]interface{}, 0, len(lifecycle.Rules)) 758 759 for _, lifecycleRule := range lifecycle.Rules { 760 rule := make(map[string]interface{}) 761 762 // ID 763 if lifecycleRule.ID != nil && *lifecycleRule.ID != "" { 764 rule["id"] = *lifecycleRule.ID 765 } 766 // Prefix 767 if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" { 768 rule["prefix"] = *lifecycleRule.Prefix 769 } 770 // Enabled 771 if lifecycleRule.Status != nil { 772 if *lifecycleRule.Status == s3.ExpirationStatusEnabled { 773 rule["enabled"] = true 774 } else { 775 rule["enabled"] = false 776 } 777 } 778 779 // AbortIncompleteMultipartUploadDays 780 if lifecycleRule.AbortIncompleteMultipartUpload != nil { 781 if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { 782 rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation) 783 } 784 } 785 786 // expiration 787 if lifecycleRule.Expiration != nil { 788 e := make(map[string]interface{}) 789 if lifecycleRule.Expiration.Date != nil { 790 e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02") 791 } 792 if lifecycleRule.Expiration.Days != nil { 793 e["days"] = int(*lifecycleRule.Expiration.Days) 794 } 795 if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { 796 e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker 797 } 798 rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 799 } 800 // noncurrent_version_expiration 801 if lifecycleRule.NoncurrentVersionExpiration != nil { 802 e := make(map[string]interface{}) 803 if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { 804 e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays) 805 } 806 rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 807 } 808 //// transition 809 if len(lifecycleRule.Transitions) > 0 { 810 transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) 811 for _, v := range lifecycleRule.Transitions { 812 t := make(map[string]interface{}) 813 if v.Date != nil { 814 t["date"] = (*v.Date).Format("2006-01-02") 815 } 816 if v.Days != nil { 817 t["days"] = int(*v.Days) 818 } 819 if v.StorageClass != nil { 820 t["storage_class"] = *v.StorageClass 821 } 822 transitions = append(transitions, t) 823 } 824 rule["transition"] = schema.NewSet(transitionHash, transitions) 825 } 826 // noncurrent_version_transition 827 if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { 828 transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) 829 for _, v := range lifecycleRule.NoncurrentVersionTransitions { 830 t := make(map[string]interface{}) 831 if v.NoncurrentDays != nil { 832 t["days"] = int(*v.NoncurrentDays) 833 } 834 if v.StorageClass != nil { 835 t["storage_class"] = *v.StorageClass 836 } 837 transitions = append(transitions, t) 838 } 839 rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) 840 } 841 842 rules = append(rules, rule) 843 } 844 845 if err := d.Set("lifecycle_rule", rules); err != nil { 846 return err 847 } 848 } 849 850 // Read the bucket replication configuration 851 replication, err := s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ 852 Bucket: aws.String(d.Id()), 853 }) 854 if err != nil { 855 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { 856 return err 857 } 858 } 859 860 log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication) 861 if r := replication.ReplicationConfiguration; r != nil { 862 if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil { 863 log.Printf("[DEBUG] Error setting replication configuration: %s", err) 864 return err 865 } 866 } 867 868 // Add the region as an attribute 869 location, err := s3conn.GetBucketLocation( 870 &s3.GetBucketLocationInput{ 871 Bucket: aws.String(d.Id()), 872 }, 873 ) 874 if err != nil { 875 return err 876 } 877 var region string 878 if location.LocationConstraint != nil { 879 region = *location.LocationConstraint 880 } 881 region = normalizeRegion(region) 882 if err := d.Set("region", region); err != nil { 883 return err 884 } 885 886 // Add the hosted zone ID for this bucket's region as an attribute 887 hostedZoneID := HostedZoneIDForRegion(region) 888 if err := d.Set("hosted_zone_id", hostedZoneID); err != nil { 889 return err 890 } 891 892 // Add website_endpoint as an attribute 893 websiteEndpoint, err := websiteEndpoint(s3conn, d) 894 if err != nil { 895 return err 896 } 897 if websiteEndpoint != nil { 898 if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil { 899 return err 900 } 901 if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil { 902 return err 903 } 904 } 905 906 tagSet, err := getTagSetS3(s3conn, d.Id()) 907 if err != nil { 908 return err 909 } 910 911 if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { 912 return err 913 } 914 915 d.Set("arn", fmt.Sprintf("arn:%s:s3:::%s", meta.(*AWSClient).partition, d.Id())) 916 917 return nil 918 } 919 920 func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { 921 s3conn := meta.(*AWSClient).s3conn 922 923 log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id()) 924 _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{ 925 Bucket: aws.String(d.Id()), 926 }) 927 if err != nil { 928 ec2err, ok := err.(awserr.Error) 929 if ok && ec2err.Code() == "BucketNotEmpty" { 930 if d.Get("force_destroy").(bool) { 931 // bucket may have things delete them 932 log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) 933 934 bucket := d.Get("bucket").(string) 935 resp, err := s3conn.ListObjectVersions( 936 &s3.ListObjectVersionsInput{ 937 Bucket: aws.String(bucket), 938 }, 939 ) 940 941 if err != nil { 942 return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) 943 } 944 945 objectsToDelete := make([]*s3.ObjectIdentifier, 0) 946 947 if len(resp.DeleteMarkers) != 0 { 948 949 for _, v := range resp.DeleteMarkers { 950 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 951 Key: v.Key, 952 VersionId: v.VersionId, 953 }) 954 } 955 } 956 957 if len(resp.Versions) != 0 { 958 for _, v := range resp.Versions { 959 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 960 Key: v.Key, 961 VersionId: v.VersionId, 962 }) 963 } 964 } 965 966 params := &s3.DeleteObjectsInput{ 967 Bucket: aws.String(bucket), 968 Delete: &s3.Delete{ 969 Objects: objectsToDelete, 970 }, 971 } 972 973 _, err = s3conn.DeleteObjects(params) 974 975 if err != nil { 976 return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) 977 } 978 979 // this line recurses until all objects are deleted or an error is returned 980 return resourceAwsS3BucketDelete(d, meta) 981 } 982 } 983 return fmt.Errorf("Error deleting S3 Bucket: %s %q", err, d.Get("bucket").(string)) 984 } 985 return nil 986 } 987 988 func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 989 bucket := d.Get("bucket").(string) 990 policy := d.Get("policy").(string) 991 992 if policy != "" { 993 log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) 994 995 params := &s3.PutBucketPolicyInput{ 996 Bucket: aws.String(bucket), 997 Policy: aws.String(policy), 998 } 999 1000 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1001 if _, err := s3conn.PutBucketPolicy(params); err != nil { 1002 if awserr, ok := err.(awserr.Error); ok { 1003 if awserr.Code() == "MalformedPolicy" { 1004 return resource.RetryableError(awserr) 1005 } 1006 } 1007 return resource.NonRetryableError(err) 1008 } 1009 return nil 1010 }) 1011 1012 if err != nil { 1013 return fmt.Errorf("Error putting S3 policy: %s", err) 1014 } 1015 } else { 1016 log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy) 1017 _, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ 1018 Bucket: aws.String(bucket), 1019 }) 1020 1021 if err != nil { 1022 return fmt.Errorf("Error deleting S3 policy: %s", err) 1023 } 1024 } 1025 1026 return nil 1027 } 1028 1029 func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1030 bucket := d.Get("bucket").(string) 1031 rawCors := d.Get("cors_rule").([]interface{}) 1032 1033 if len(rawCors) == 0 { 1034 // Delete CORS 1035 log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket) 1036 _, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ 1037 Bucket: aws.String(bucket), 1038 }) 1039 if err != nil { 1040 return fmt.Errorf("Error deleting S3 CORS: %s", err) 1041 } 1042 } else { 1043 // Put CORS 1044 rules := make([]*s3.CORSRule, 0, len(rawCors)) 1045 for _, cors := range rawCors { 1046 corsMap := cors.(map[string]interface{}) 1047 r := &s3.CORSRule{} 1048 for k, v := range corsMap { 1049 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v) 1050 if k == "max_age_seconds" { 1051 r.MaxAgeSeconds = aws.Int64(int64(v.(int))) 1052 } else { 1053 vMap := make([]*string, len(v.([]interface{}))) 1054 for i, vv := range v.([]interface{}) { 1055 str := vv.(string) 1056 vMap[i] = aws.String(str) 1057 } 1058 switch k { 1059 case "allowed_headers": 1060 r.AllowedHeaders = vMap 1061 case "allowed_methods": 1062 r.AllowedMethods = vMap 1063 case "allowed_origins": 1064 r.AllowedOrigins = vMap 1065 case "expose_headers": 1066 r.ExposeHeaders = vMap 1067 } 1068 } 1069 } 1070 rules = append(rules, r) 1071 } 1072 corsInput := &s3.PutBucketCorsInput{ 1073 Bucket: aws.String(bucket), 1074 CORSConfiguration: &s3.CORSConfiguration{ 1075 CORSRules: rules, 1076 }, 1077 } 1078 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput) 1079 _, err := s3conn.PutBucketCors(corsInput) 1080 if err != nil { 1081 return fmt.Errorf("Error putting S3 CORS: %s", err) 1082 } 1083 } 1084 1085 return nil 1086 } 1087 1088 func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1089 ws := d.Get("website").([]interface{}) 1090 1091 if len(ws) == 1 { 1092 var w map[string]interface{} 1093 if ws[0] != nil { 1094 w = ws[0].(map[string]interface{}) 1095 } else { 1096 w = make(map[string]interface{}) 1097 } 1098 return resourceAwsS3BucketWebsitePut(s3conn, d, w) 1099 } else if len(ws) == 0 { 1100 return resourceAwsS3BucketWebsiteDelete(s3conn, d) 1101 } else { 1102 return fmt.Errorf("Cannot specify more than one website.") 1103 } 1104 } 1105 1106 func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error { 1107 bucket := d.Get("bucket").(string) 1108 1109 var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string 1110 if v, ok := website["index_document"]; ok { 1111 indexDocument = v.(string) 1112 } 1113 if v, ok := website["error_document"]; ok { 1114 errorDocument = v.(string) 1115 } 1116 if v, ok := website["redirect_all_requests_to"]; ok { 1117 redirectAllRequestsTo = v.(string) 1118 } 1119 if v, ok := website["routing_rules"]; ok { 1120 routingRules = v.(string) 1121 } 1122 1123 if indexDocument == "" && redirectAllRequestsTo == "" { 1124 return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.") 1125 } 1126 1127 websiteConfiguration := &s3.WebsiteConfiguration{} 1128 1129 if indexDocument != "" { 1130 websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)} 1131 } 1132 1133 if errorDocument != "" { 1134 websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)} 1135 } 1136 1137 if redirectAllRequestsTo != "" { 1138 redirect, err := url.Parse(redirectAllRequestsTo) 1139 if err == nil && redirect.Scheme != "" { 1140 var redirectHostBuf bytes.Buffer 1141 redirectHostBuf.WriteString(redirect.Host) 1142 if redirect.Path != "" { 1143 redirectHostBuf.WriteString(redirect.Path) 1144 } 1145 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)} 1146 } else { 1147 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} 1148 } 1149 } 1150 1151 if routingRules != "" { 1152 var unmarshaledRules []*s3.RoutingRule 1153 if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil { 1154 return err 1155 } 1156 websiteConfiguration.RoutingRules = unmarshaledRules 1157 } 1158 1159 putInput := &s3.PutBucketWebsiteInput{ 1160 Bucket: aws.String(bucket), 1161 WebsiteConfiguration: websiteConfiguration, 1162 } 1163 1164 log.Printf("[DEBUG] S3 put bucket website: %#v", putInput) 1165 1166 _, err := s3conn.PutBucketWebsite(putInput) 1167 if err != nil { 1168 return fmt.Errorf("Error putting S3 website: %s", err) 1169 } 1170 1171 return nil 1172 } 1173 1174 func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error { 1175 bucket := d.Get("bucket").(string) 1176 deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)} 1177 1178 log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput) 1179 1180 _, err := s3conn.DeleteBucketWebsite(deleteInput) 1181 if err != nil { 1182 return fmt.Errorf("Error deleting S3 website: %s", err) 1183 } 1184 1185 d.Set("website_endpoint", "") 1186 d.Set("website_domain", "") 1187 1188 return nil 1189 } 1190 1191 func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) { 1192 // If the bucket doesn't have a website configuration, return an empty 1193 // endpoint 1194 if _, ok := d.GetOk("website"); !ok { 1195 return nil, nil 1196 } 1197 1198 bucket := d.Get("bucket").(string) 1199 1200 // Lookup the region for this bucket 1201 location, err := s3conn.GetBucketLocation( 1202 &s3.GetBucketLocationInput{ 1203 Bucket: aws.String(bucket), 1204 }, 1205 ) 1206 if err != nil { 1207 return nil, err 1208 } 1209 var region string 1210 if location.LocationConstraint != nil { 1211 region = *location.LocationConstraint 1212 } 1213 1214 return WebsiteEndpoint(bucket, region), nil 1215 } 1216 1217 func bucketDomainName(bucket string) string { 1218 return fmt.Sprintf("%s.s3.amazonaws.com", bucket) 1219 } 1220 1221 func WebsiteEndpoint(bucket string, region string) *S3Website { 1222 domain := WebsiteDomainUrl(region) 1223 return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} 1224 } 1225 1226 func WebsiteDomainUrl(region string) string { 1227 region = normalizeRegion(region) 1228 1229 // New regions uses different syntax for website endpoints 1230 // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html 1231 if isOldRegion(region) { 1232 return fmt.Sprintf("s3-website-%s.amazonaws.com", region) 1233 } 1234 return fmt.Sprintf("s3-website.%s.amazonaws.com", region) 1235 } 1236 1237 func isOldRegion(region string) bool { 1238 oldRegions := []string{ 1239 "ap-northeast-1", 1240 "ap-southeast-1", 1241 "ap-southeast-2", 1242 "eu-west-1", 1243 "sa-east-1", 1244 "us-east-1", 1245 "us-gov-west-1", 1246 "us-west-1", 1247 "us-west-2", 1248 } 1249 for _, r := range oldRegions { 1250 if region == r { 1251 return true 1252 } 1253 } 1254 return false 1255 } 1256 1257 func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1258 acl := d.Get("acl").(string) 1259 bucket := d.Get("bucket").(string) 1260 1261 i := &s3.PutBucketAclInput{ 1262 Bucket: aws.String(bucket), 1263 ACL: aws.String(acl), 1264 } 1265 log.Printf("[DEBUG] S3 put bucket ACL: %#v", i) 1266 1267 _, err := s3conn.PutBucketAcl(i) 1268 if err != nil { 1269 return fmt.Errorf("Error putting S3 ACL: %s", err) 1270 } 1271 1272 return nil 1273 } 1274 1275 func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1276 v := d.Get("versioning").([]interface{}) 1277 bucket := d.Get("bucket").(string) 1278 vc := &s3.VersioningConfiguration{} 1279 1280 if len(v) > 0 { 1281 c := v[0].(map[string]interface{}) 1282 1283 if c["enabled"].(bool) { 1284 vc.Status = aws.String(s3.BucketVersioningStatusEnabled) 1285 } else { 1286 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1287 } 1288 1289 if c["mfa_delete"].(bool) { 1290 vc.MFADelete = aws.String(s3.MFADeleteEnabled) 1291 } else { 1292 vc.MFADelete = aws.String(s3.MFADeleteDisabled) 1293 } 1294 1295 } else { 1296 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1297 } 1298 1299 i := &s3.PutBucketVersioningInput{ 1300 Bucket: aws.String(bucket), 1301 VersioningConfiguration: vc, 1302 } 1303 log.Printf("[DEBUG] S3 put bucket versioning: %#v", i) 1304 1305 _, err := s3conn.PutBucketVersioning(i) 1306 if err != nil { 1307 return fmt.Errorf("Error putting S3 versioning: %s", err) 1308 } 1309 1310 return nil 1311 } 1312 1313 func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1314 logging := d.Get("logging").(*schema.Set).List() 1315 bucket := d.Get("bucket").(string) 1316 loggingStatus := &s3.BucketLoggingStatus{} 1317 1318 if len(logging) > 0 { 1319 c := logging[0].(map[string]interface{}) 1320 1321 loggingEnabled := &s3.LoggingEnabled{} 1322 if val, ok := c["target_bucket"]; ok { 1323 loggingEnabled.TargetBucket = aws.String(val.(string)) 1324 } 1325 if val, ok := c["target_prefix"]; ok { 1326 loggingEnabled.TargetPrefix = aws.String(val.(string)) 1327 } 1328 1329 loggingStatus.LoggingEnabled = loggingEnabled 1330 } 1331 1332 i := &s3.PutBucketLoggingInput{ 1333 Bucket: aws.String(bucket), 1334 BucketLoggingStatus: loggingStatus, 1335 } 1336 log.Printf("[DEBUG] S3 put bucket logging: %#v", i) 1337 1338 _, err := s3conn.PutBucketLogging(i) 1339 if err != nil { 1340 return fmt.Errorf("Error putting S3 logging: %s", err) 1341 } 1342 1343 return nil 1344 } 1345 1346 func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1347 bucket := d.Get("bucket").(string) 1348 enableAcceleration := d.Get("acceleration_status").(string) 1349 1350 i := &s3.PutBucketAccelerateConfigurationInput{ 1351 Bucket: aws.String(bucket), 1352 AccelerateConfiguration: &s3.AccelerateConfiguration{ 1353 Status: aws.String(enableAcceleration), 1354 }, 1355 } 1356 log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i) 1357 1358 _, err := s3conn.PutBucketAccelerateConfiguration(i) 1359 if err != nil { 1360 return fmt.Errorf("Error putting S3 acceleration: %s", err) 1361 } 1362 1363 return nil 1364 } 1365 1366 func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1367 bucket := d.Get("bucket").(string) 1368 payer := d.Get("request_payer").(string) 1369 1370 i := &s3.PutBucketRequestPaymentInput{ 1371 Bucket: aws.String(bucket), 1372 RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ 1373 Payer: aws.String(payer), 1374 }, 1375 } 1376 log.Printf("[DEBUG] S3 put bucket request payer: %#v", i) 1377 1378 _, err := s3conn.PutBucketRequestPayment(i) 1379 if err != nil { 1380 return fmt.Errorf("Error putting S3 request payer: %s", err) 1381 } 1382 1383 return nil 1384 } 1385 1386 func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1387 bucket := d.Get("bucket").(string) 1388 replicationConfiguration := d.Get("replication_configuration").([]interface{}) 1389 1390 if len(replicationConfiguration) == 0 { 1391 i := &s3.DeleteBucketReplicationInput{ 1392 Bucket: aws.String(bucket), 1393 } 1394 1395 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1396 if _, err := s3conn.DeleteBucketReplication(i); err != nil { 1397 return resource.NonRetryableError(err) 1398 } 1399 return nil 1400 }) 1401 if err != nil { 1402 return fmt.Errorf("Error removing S3 bucket replication: %s", err) 1403 } 1404 return nil 1405 } 1406 1407 hasVersioning := false 1408 // Validate that bucket versioning is enabled 1409 if versioning, ok := d.GetOk("versioning"); ok { 1410 v := versioning.([]interface{}) 1411 1412 if v[0].(map[string]interface{})["enabled"].(bool) { 1413 hasVersioning = true 1414 } 1415 } 1416 1417 if !hasVersioning { 1418 return fmt.Errorf("versioning must be enabled to allow S3 bucket replication") 1419 } 1420 1421 c := replicationConfiguration[0].(map[string]interface{}) 1422 1423 rc := &s3.ReplicationConfiguration{} 1424 if val, ok := c["role"]; ok { 1425 rc.Role = aws.String(val.(string)) 1426 } 1427 1428 rcRules := c["rules"].(*schema.Set).List() 1429 rules := []*s3.ReplicationRule{} 1430 for _, v := range rcRules { 1431 rr := v.(map[string]interface{}) 1432 rcRule := &s3.ReplicationRule{ 1433 Prefix: aws.String(rr["prefix"].(string)), 1434 Status: aws.String(rr["status"].(string)), 1435 } 1436 1437 if rrid, ok := rr["id"]; ok { 1438 rcRule.ID = aws.String(rrid.(string)) 1439 } 1440 1441 ruleDestination := &s3.Destination{} 1442 if destination, ok := rr["destination"]; ok { 1443 dest := destination.(*schema.Set).List() 1444 1445 bd := dest[0].(map[string]interface{}) 1446 ruleDestination.Bucket = aws.String(bd["bucket"].(string)) 1447 1448 if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { 1449 ruleDestination.StorageClass = aws.String(storageClass.(string)) 1450 } 1451 } 1452 rcRule.Destination = ruleDestination 1453 rules = append(rules, rcRule) 1454 } 1455 1456 rc.Rules = rules 1457 i := &s3.PutBucketReplicationInput{ 1458 Bucket: aws.String(bucket), 1459 ReplicationConfiguration: rc, 1460 } 1461 log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) 1462 1463 _, err := s3conn.PutBucketReplication(i) 1464 if err != nil { 1465 return fmt.Errorf("Error putting S3 replication configuration: %s", err) 1466 } 1467 1468 return nil 1469 } 1470 1471 func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1472 bucket := d.Get("bucket").(string) 1473 1474 lifecycleRules := d.Get("lifecycle_rule").([]interface{}) 1475 1476 if len(lifecycleRules) == 0 { 1477 i := &s3.DeleteBucketLifecycleInput{ 1478 Bucket: aws.String(bucket), 1479 } 1480 1481 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1482 if _, err := s3conn.DeleteBucketLifecycle(i); err != nil { 1483 return resource.NonRetryableError(err) 1484 } 1485 return nil 1486 }) 1487 if err != nil { 1488 return fmt.Errorf("Error removing S3 lifecycle: %s", err) 1489 } 1490 return nil 1491 } 1492 1493 rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) 1494 1495 for i, lifecycleRule := range lifecycleRules { 1496 r := lifecycleRule.(map[string]interface{}) 1497 1498 rule := &s3.LifecycleRule{ 1499 Prefix: aws.String(r["prefix"].(string)), 1500 } 1501 1502 // ID 1503 if val, ok := r["id"].(string); ok && val != "" { 1504 rule.ID = aws.String(val) 1505 } else { 1506 rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) 1507 } 1508 1509 // Enabled 1510 if val, ok := r["enabled"].(bool); ok && val { 1511 rule.Status = aws.String(s3.ExpirationStatusEnabled) 1512 } else { 1513 rule.Status = aws.String(s3.ExpirationStatusDisabled) 1514 } 1515 1516 // AbortIncompleteMultipartUpload 1517 if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { 1518 rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ 1519 DaysAfterInitiation: aws.Int64(int64(val)), 1520 } 1521 } 1522 1523 // Expiration 1524 expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List() 1525 if len(expiration) > 0 { 1526 e := expiration[0].(map[string]interface{}) 1527 i := &s3.LifecycleExpiration{} 1528 1529 if val, ok := e["date"].(string); ok && val != "" { 1530 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1531 if err != nil { 1532 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1533 } 1534 i.Date = aws.Time(t) 1535 } else if val, ok := e["days"].(int); ok && val > 0 { 1536 i.Days = aws.Int64(int64(val)) 1537 } else if val, ok := e["expired_object_delete_marker"].(bool); ok { 1538 i.ExpiredObjectDeleteMarker = aws.Bool(val) 1539 } 1540 rule.Expiration = i 1541 } 1542 1543 // NoncurrentVersionExpiration 1544 nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List() 1545 if len(nc_expiration) > 0 { 1546 e := nc_expiration[0].(map[string]interface{}) 1547 1548 if val, ok := e["days"].(int); ok && val > 0 { 1549 rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ 1550 NoncurrentDays: aws.Int64(int64(val)), 1551 } 1552 } 1553 } 1554 1555 // Transitions 1556 transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() 1557 if len(transitions) > 0 { 1558 rule.Transitions = make([]*s3.Transition, 0, len(transitions)) 1559 for _, transition := range transitions { 1560 transition := transition.(map[string]interface{}) 1561 i := &s3.Transition{} 1562 if val, ok := transition["date"].(string); ok && val != "" { 1563 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1564 if err != nil { 1565 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1566 } 1567 i.Date = aws.Time(t) 1568 } else if val, ok := transition["days"].(int); ok && val > 0 { 1569 i.Days = aws.Int64(int64(val)) 1570 } 1571 if val, ok := transition["storage_class"].(string); ok && val != "" { 1572 i.StorageClass = aws.String(val) 1573 } 1574 1575 rule.Transitions = append(rule.Transitions, i) 1576 } 1577 } 1578 // NoncurrentVersionTransitions 1579 nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() 1580 if len(nc_transitions) > 0 { 1581 rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) 1582 for _, transition := range nc_transitions { 1583 transition := transition.(map[string]interface{}) 1584 i := &s3.NoncurrentVersionTransition{} 1585 if val, ok := transition["days"].(int); ok && val > 0 { 1586 i.NoncurrentDays = aws.Int64(int64(val)) 1587 } 1588 if val, ok := transition["storage_class"].(string); ok && val != "" { 1589 i.StorageClass = aws.String(val) 1590 } 1591 1592 rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) 1593 } 1594 } 1595 1596 rules = append(rules, rule) 1597 } 1598 1599 i := &s3.PutBucketLifecycleConfigurationInput{ 1600 Bucket: aws.String(bucket), 1601 LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ 1602 Rules: rules, 1603 }, 1604 } 1605 1606 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1607 if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil { 1608 return resource.NonRetryableError(err) 1609 } 1610 return nil 1611 }) 1612 if err != nil { 1613 return fmt.Errorf("Error putting S3 lifecycle: %s", err) 1614 } 1615 1616 return nil 1617 } 1618 1619 func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration) []map[string]interface{} { 1620 replication_configuration := make([]map[string]interface{}, 0, 1) 1621 m := make(map[string]interface{}) 1622 1623 if r.Role != nil && *r.Role != "" { 1624 m["role"] = *r.Role 1625 } 1626 1627 rules := make([]interface{}, 0, len(r.Rules)) 1628 for _, v := range r.Rules { 1629 t := make(map[string]interface{}) 1630 if v.Destination != nil { 1631 rd := make(map[string]interface{}) 1632 if v.Destination.Bucket != nil { 1633 rd["bucket"] = *v.Destination.Bucket 1634 } 1635 if v.Destination.StorageClass != nil { 1636 rd["storage_class"] = *v.Destination.StorageClass 1637 } 1638 t["destination"] = schema.NewSet(destinationHash, []interface{}{rd}) 1639 } 1640 1641 if v.ID != nil { 1642 t["id"] = *v.ID 1643 } 1644 if v.Prefix != nil { 1645 t["prefix"] = *v.Prefix 1646 } 1647 if v.Status != nil { 1648 t["status"] = *v.Status 1649 } 1650 rules = append(rules, t) 1651 } 1652 m["rules"] = schema.NewSet(rulesHash, rules) 1653 1654 replication_configuration = append(replication_configuration, m) 1655 1656 return replication_configuration 1657 } 1658 1659 func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { 1660 withNulls, err := json.Marshal(w) 1661 if err != nil { 1662 return "", err 1663 } 1664 1665 var rules []map[string]interface{} 1666 if err := json.Unmarshal(withNulls, &rules); err != nil { 1667 return "", err 1668 } 1669 1670 var cleanRules []map[string]interface{} 1671 for _, rule := range rules { 1672 cleanRules = append(cleanRules, removeNil(rule)) 1673 } 1674 1675 withoutNulls, err := json.Marshal(cleanRules) 1676 if err != nil { 1677 return "", err 1678 } 1679 1680 return string(withoutNulls), nil 1681 } 1682 1683 func removeNil(data map[string]interface{}) map[string]interface{} { 1684 withoutNil := make(map[string]interface{}) 1685 1686 for k, v := range data { 1687 if v == nil { 1688 continue 1689 } 1690 1691 switch v.(type) { 1692 case map[string]interface{}: 1693 withoutNil[k] = removeNil(v.(map[string]interface{})) 1694 default: 1695 withoutNil[k] = v 1696 } 1697 } 1698 1699 return withoutNil 1700 } 1701 1702 // DEPRECATED. Please consider using `normalizeJsonString` function instead. 1703 func normalizeJson(jsonString interface{}) string { 1704 if jsonString == nil || jsonString == "" { 1705 return "" 1706 } 1707 var j interface{} 1708 err := json.Unmarshal([]byte(jsonString.(string)), &j) 1709 if err != nil { 1710 return fmt.Sprintf("Error parsing JSON: %s", err) 1711 } 1712 b, _ := json.Marshal(j) 1713 return string(b[:]) 1714 } 1715 1716 func normalizeRegion(region string) string { 1717 // Default to us-east-1 if the bucket doesn't have a region: 1718 // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html 1719 if region == "" { 1720 region = "us-east-1" 1721 } 1722 1723 return region 1724 } 1725 1726 func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) { 1727 validTypes := map[string]struct{}{ 1728 "Enabled": struct{}{}, 1729 "Suspended": struct{}{}, 1730 } 1731 1732 if _, ok := validTypes[v.(string)]; !ok { 1733 errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended")) 1734 } 1735 return 1736 } 1737 1738 func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) { 1739 value := v.(string) 1740 if value != s3.PayerRequester && value != s3.PayerBucketOwner { 1741 errors = append(errors, fmt.Errorf( 1742 "%q contains an invalid Request Payer type %q. Valid types are either %q or %q", 1743 k, value, s3.PayerRequester, s3.PayerBucketOwner)) 1744 } 1745 return 1746 } 1747 1748 // validateS3BucketName validates any S3 bucket name that is not inside the us-east-1 region. 1749 // Buckets outside of this region have to be DNS-compliant. After the same restrictions are 1750 // applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc 1751 func validateS3BucketName(value string, region string) error { 1752 if region != "us-east-1" { 1753 if (len(value) < 3) || (len(value) > 63) { 1754 return fmt.Errorf("%q must contain from 3 to 63 characters", value) 1755 } 1756 if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { 1757 return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) 1758 } 1759 if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { 1760 return fmt.Errorf("%q must not be formatted as an IP address", value) 1761 } 1762 if strings.HasPrefix(value, `.`) { 1763 return fmt.Errorf("%q cannot start with a period", value) 1764 } 1765 if strings.HasSuffix(value, `.`) { 1766 return fmt.Errorf("%q cannot end with a period", value) 1767 } 1768 if strings.Contains(value, `..`) { 1769 return fmt.Errorf("%q can be only one period between labels", value) 1770 } 1771 } else { 1772 if len(value) > 255 { 1773 return fmt.Errorf("%q must contain less than 256 characters", value) 1774 } 1775 if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { 1776 return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) 1777 } 1778 } 1779 return nil 1780 } 1781 1782 func expirationHash(v interface{}) int { 1783 var buf bytes.Buffer 1784 m := v.(map[string]interface{}) 1785 if v, ok := m["date"]; ok { 1786 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1787 } 1788 if v, ok := m["days"]; ok { 1789 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1790 } 1791 if v, ok := m["expired_object_delete_marker"]; ok { 1792 buf.WriteString(fmt.Sprintf("%t-", v.(bool))) 1793 } 1794 return hashcode.String(buf.String()) 1795 } 1796 1797 func transitionHash(v interface{}) int { 1798 var buf bytes.Buffer 1799 m := v.(map[string]interface{}) 1800 if v, ok := m["date"]; ok { 1801 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1802 } 1803 if v, ok := m["days"]; ok { 1804 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1805 } 1806 if v, ok := m["storage_class"]; ok { 1807 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1808 } 1809 return hashcode.String(buf.String()) 1810 } 1811 1812 func rulesHash(v interface{}) int { 1813 var buf bytes.Buffer 1814 m := v.(map[string]interface{}) 1815 1816 if v, ok := m["id"]; ok { 1817 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1818 } 1819 if v, ok := m["prefix"]; ok { 1820 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1821 } 1822 if v, ok := m["status"]; ok { 1823 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1824 } 1825 return hashcode.String(buf.String()) 1826 } 1827 1828 func destinationHash(v interface{}) int { 1829 var buf bytes.Buffer 1830 m := v.(map[string]interface{}) 1831 1832 if v, ok := m["bucket"]; ok { 1833 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1834 } 1835 if v, ok := m["storage_class"]; ok { 1836 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1837 } 1838 return hashcode.String(buf.String()) 1839 } 1840 1841 type S3Website struct { 1842 Endpoint, Domain string 1843 }