github.com/anfernee/terraform@v0.6.16-0.20160430000239-06e5085a92f2/builtin/providers/aws/resource_aws_s3_bucket.go (about) 1 package aws 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "log" 8 "net/url" 9 "time" 10 11 "github.com/hashicorp/terraform/helper/resource" 12 "github.com/hashicorp/terraform/helper/schema" 13 14 "github.com/aws/aws-sdk-go/aws" 15 "github.com/aws/aws-sdk-go/aws/awserr" 16 "github.com/aws/aws-sdk-go/service/s3" 17 "github.com/hashicorp/terraform/helper/hashcode" 18 ) 19 20 func resourceAwsS3Bucket() *schema.Resource { 21 return &schema.Resource{ 22 Create: resourceAwsS3BucketCreate, 23 Read: resourceAwsS3BucketRead, 24 Update: resourceAwsS3BucketUpdate, 25 Delete: resourceAwsS3BucketDelete, 26 27 Schema: map[string]*schema.Schema{ 28 "bucket": &schema.Schema{ 29 Type: schema.TypeString, 30 Required: true, 31 ForceNew: true, 32 }, 33 34 "arn": &schema.Schema{ 35 Type: schema.TypeString, 36 Optional: true, 37 Computed: true, 38 }, 39 40 "acl": &schema.Schema{ 41 Type: schema.TypeString, 42 Default: "private", 43 Optional: true, 44 }, 45 46 "policy": &schema.Schema{ 47 Type: schema.TypeString, 48 Optional: true, 49 StateFunc: normalizeJson, 50 }, 51 52 "cors_rule": &schema.Schema{ 53 Type: schema.TypeList, 54 Optional: true, 55 Elem: &schema.Resource{ 56 Schema: map[string]*schema.Schema{ 57 "allowed_headers": &schema.Schema{ 58 Type: schema.TypeList, 59 Optional: true, 60 Elem: &schema.Schema{Type: schema.TypeString}, 61 }, 62 "allowed_methods": &schema.Schema{ 63 Type: schema.TypeList, 64 Required: true, 65 Elem: &schema.Schema{Type: schema.TypeString}, 66 }, 67 "allowed_origins": &schema.Schema{ 68 Type: schema.TypeList, 69 Required: true, 70 Elem: &schema.Schema{Type: schema.TypeString}, 71 }, 72 "expose_headers": &schema.Schema{ 73 Type: schema.TypeList, 74 Optional: true, 75 Elem: &schema.Schema{Type: schema.TypeString}, 76 }, 77 "max_age_seconds": &schema.Schema{ 78 Type: schema.TypeInt, 79 Optional: true, 80 }, 81 }, 82 }, 83 }, 84 85 "website": &schema.Schema{ 86 Type: schema.TypeList, 87 Optional: true, 88 Elem: &schema.Resource{ 89 Schema: map[string]*schema.Schema{ 90 "index_document": &schema.Schema{ 91 Type: schema.TypeString, 92 Optional: true, 93 }, 94 95 "error_document": &schema.Schema{ 96 Type: schema.TypeString, 97 Optional: true, 98 }, 99 100 "redirect_all_requests_to": &schema.Schema{ 101 Type: schema.TypeString, 102 ConflictsWith: []string{ 103 "website.0.index_document", 104 "website.0.error_document", 105 "website.0.routing_rules", 106 }, 107 Optional: true, 108 }, 109 110 "routing_rules": &schema.Schema{ 111 Type: schema.TypeString, 112 Optional: true, 113 StateFunc: normalizeJson, 114 }, 115 }, 116 }, 117 }, 118 119 "hosted_zone_id": &schema.Schema{ 120 Type: schema.TypeString, 121 Optional: true, 122 Computed: true, 123 }, 124 125 "region": &schema.Schema{ 126 Type: schema.TypeString, 127 Optional: true, 128 Computed: true, 129 }, 130 "website_endpoint": &schema.Schema{ 131 Type: schema.TypeString, 132 Optional: true, 133 Computed: true, 134 }, 135 "website_domain": &schema.Schema{ 136 Type: schema.TypeString, 137 Optional: true, 138 Computed: true, 139 }, 140 141 "versioning": &schema.Schema{ 142 Type: schema.TypeSet, 143 Optional: true, 144 Elem: &schema.Resource{ 145 Schema: map[string]*schema.Schema{ 146 "enabled": &schema.Schema{ 147 Type: schema.TypeBool, 148 Optional: true, 149 Default: false, 150 }, 151 }, 152 }, 153 Set: func(v interface{}) int { 154 var buf bytes.Buffer 155 m := v.(map[string]interface{}) 156 buf.WriteString(fmt.Sprintf("%t-", m["enabled"].(bool))) 157 158 return hashcode.String(buf.String()) 159 }, 160 }, 161 162 "logging": &schema.Schema{ 163 Type: schema.TypeSet, 164 Optional: true, 165 Elem: &schema.Resource{ 166 Schema: map[string]*schema.Schema{ 167 "target_bucket": &schema.Schema{ 168 Type: schema.TypeString, 169 Required: true, 170 }, 171 "target_prefix": &schema.Schema{ 172 Type: schema.TypeString, 173 Optional: true, 174 }, 175 }, 176 }, 177 Set: func(v interface{}) int { 178 var buf bytes.Buffer 179 m := v.(map[string]interface{}) 180 buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"])) 181 buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"])) 182 return hashcode.String(buf.String()) 183 }, 184 }, 185 186 "lifecycle_rule": &schema.Schema{ 187 Type: schema.TypeList, 188 Optional: true, 189 Elem: &schema.Resource{ 190 Schema: map[string]*schema.Schema{ 191 "id": &schema.Schema{ 192 Type: schema.TypeString, 193 Optional: true, 194 Computed: true, 195 ValidateFunc: validateS3BucketLifecycleRuleId, 196 }, 197 "prefix": &schema.Schema{ 198 Type: schema.TypeString, 199 Required: true, 200 }, 201 "enabled": &schema.Schema{ 202 Type: schema.TypeBool, 203 Required: true, 204 }, 205 "abort_incomplete_multipart_upload_days": &schema.Schema{ 206 Type: schema.TypeInt, 207 Optional: true, 208 }, 209 "expiration": &schema.Schema{ 210 Type: schema.TypeSet, 211 Optional: true, 212 Set: expirationHash, 213 Elem: &schema.Resource{ 214 Schema: map[string]*schema.Schema{ 215 "date": &schema.Schema{ 216 Type: schema.TypeString, 217 Optional: true, 218 ValidateFunc: validateS3BucketLifecycleTimestamp, 219 }, 220 "days": &schema.Schema{ 221 Type: schema.TypeInt, 222 Optional: true, 223 }, 224 "expired_object_delete_marker": &schema.Schema{ 225 Type: schema.TypeBool, 226 Optional: true, 227 }, 228 }, 229 }, 230 }, 231 "noncurrent_version_expiration": &schema.Schema{ 232 Type: schema.TypeSet, 233 Optional: true, 234 Set: expirationHash, 235 Elem: &schema.Resource{ 236 Schema: map[string]*schema.Schema{ 237 "days": &schema.Schema{ 238 Type: schema.TypeInt, 239 Optional: true, 240 }, 241 }, 242 }, 243 }, 244 "transition": &schema.Schema{ 245 Type: schema.TypeSet, 246 Optional: true, 247 Set: transitionHash, 248 Elem: &schema.Resource{ 249 Schema: map[string]*schema.Schema{ 250 "date": &schema.Schema{ 251 Type: schema.TypeString, 252 Optional: true, 253 ValidateFunc: validateS3BucketLifecycleTimestamp, 254 }, 255 "days": &schema.Schema{ 256 Type: schema.TypeInt, 257 Optional: true, 258 }, 259 "storage_class": &schema.Schema{ 260 Type: schema.TypeString, 261 Required: true, 262 ValidateFunc: validateS3BucketLifecycleStorageClass, 263 }, 264 }, 265 }, 266 }, 267 "noncurrent_version_transition": &schema.Schema{ 268 Type: schema.TypeSet, 269 Optional: true, 270 Set: transitionHash, 271 Elem: &schema.Resource{ 272 Schema: map[string]*schema.Schema{ 273 "days": &schema.Schema{ 274 Type: schema.TypeInt, 275 Optional: true, 276 }, 277 "storage_class": &schema.Schema{ 278 Type: schema.TypeString, 279 Required: true, 280 ValidateFunc: validateS3BucketLifecycleStorageClass, 281 }, 282 }, 283 }, 284 }, 285 }, 286 }, 287 }, 288 289 "tags": tagsSchema(), 290 291 "force_destroy": &schema.Schema{ 292 Type: schema.TypeBool, 293 Optional: true, 294 Default: false, 295 }, 296 }, 297 } 298 } 299 300 func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { 301 s3conn := meta.(*AWSClient).s3conn 302 awsRegion := meta.(*AWSClient).region 303 304 // Get the bucket and acl 305 bucket := d.Get("bucket").(string) 306 acl := d.Get("acl").(string) 307 308 log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl) 309 310 req := &s3.CreateBucketInput{ 311 Bucket: aws.String(bucket), 312 ACL: aws.String(acl), 313 } 314 315 // Special case us-east-1 region and do not set the LocationConstraint. 316 // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html 317 if awsRegion != "us-east-1" { 318 req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ 319 LocationConstraint: aws.String(awsRegion), 320 } 321 } 322 323 err := resource.Retry(5*time.Minute, func() *resource.RetryError { 324 log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket) 325 _, err := s3conn.CreateBucket(req) 326 if awsErr, ok := err.(awserr.Error); ok { 327 if awsErr.Code() == "OperationAborted" { 328 log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err) 329 return resource.RetryableError( 330 fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s", 331 bucket, err)) 332 } 333 } 334 if err != nil { 335 return resource.NonRetryableError(err) 336 } 337 338 return nil 339 }) 340 341 if err != nil { 342 return fmt.Errorf("Error creating S3 bucket: %s", err) 343 } 344 345 // Assign the bucket name as the resource ID 346 d.SetId(bucket) 347 348 return resourceAwsS3BucketUpdate(d, meta) 349 } 350 351 func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { 352 s3conn := meta.(*AWSClient).s3conn 353 if err := setTagsS3(s3conn, d); err != nil { 354 return err 355 } 356 357 if d.HasChange("policy") { 358 if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil { 359 return err 360 } 361 } 362 363 if d.HasChange("cors_rule") { 364 if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil { 365 return err 366 } 367 } 368 369 if d.HasChange("website") { 370 if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { 371 return err 372 } 373 } 374 375 if d.HasChange("versioning") { 376 if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil { 377 return err 378 } 379 } 380 if d.HasChange("acl") { 381 if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil { 382 return err 383 } 384 } 385 386 if d.HasChange("logging") { 387 if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil { 388 return err 389 } 390 } 391 392 if d.HasChange("lifecycle_rule") { 393 if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil { 394 return err 395 } 396 } 397 398 return resourceAwsS3BucketRead(d, meta) 399 } 400 401 func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { 402 s3conn := meta.(*AWSClient).s3conn 403 404 var err error 405 _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ 406 Bucket: aws.String(d.Id()), 407 }) 408 if err != nil { 409 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { 410 log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) 411 d.SetId("") 412 return nil 413 } else { 414 // some of the AWS SDK's errors can be empty strings, so let's add 415 // some additional context. 416 return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) 417 } 418 } 419 420 // In the import case, we won't have this 421 if _, ok := d.GetOk("bucket"); !ok { 422 d.Set("bucket", d.Id()) 423 } 424 425 // Read the policy 426 pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ 427 Bucket: aws.String(d.Id()), 428 }) 429 log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol) 430 if err != nil { 431 if err := d.Set("policy", ""); err != nil { 432 return err 433 } 434 } else { 435 if v := pol.Policy; v == nil { 436 if err := d.Set("policy", ""); err != nil { 437 return err 438 } 439 } else if err := d.Set("policy", normalizeJson(*v)); err != nil { 440 return err 441 } 442 } 443 444 // Read the CORS 445 cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ 446 Bucket: aws.String(d.Id()), 447 }) 448 log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) 449 if err != nil { 450 rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) 451 for _, ruleObject := range cors.CORSRules { 452 rule := make(map[string]interface{}) 453 rule["allowed_headers"] = ruleObject.AllowedHeaders 454 rule["allowed_methods"] = ruleObject.AllowedMethods 455 rule["allowed_origins"] = ruleObject.AllowedOrigins 456 rule["expose_headers"] = ruleObject.ExposeHeaders 457 rule["max_age_seconds"] = ruleObject.MaxAgeSeconds 458 rules = append(rules, rule) 459 } 460 if err := d.Set("cors_rule", rules); err != nil { 461 return fmt.Errorf("error reading S3 bucket \"%s\" CORS rules: %s", d.Id(), err) 462 } 463 } 464 465 // Read the website configuration 466 ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ 467 Bucket: aws.String(d.Id()), 468 }) 469 var websites []map[string]interface{} 470 if err == nil { 471 w := make(map[string]interface{}) 472 473 if v := ws.IndexDocument; v != nil { 474 w["index_document"] = *v.Suffix 475 } 476 477 if v := ws.ErrorDocument; v != nil { 478 w["error_document"] = *v.Key 479 } 480 481 if v := ws.RedirectAllRequestsTo; v != nil { 482 if v.Protocol == nil { 483 w["redirect_all_requests_to"] = *v.HostName 484 } else { 485 w["redirect_all_requests_to"] = (&url.URL{ 486 Host: *v.HostName, 487 Scheme: *v.Protocol, 488 }).String() 489 } 490 } 491 492 if v := ws.RoutingRules; v != nil { 493 rr, err := normalizeRoutingRules(v) 494 if err != nil { 495 return fmt.Errorf("Error while marshaling routing rules: %s", err) 496 } 497 w["routing_rules"] = rr 498 } 499 500 websites = append(websites, w) 501 } 502 if err := d.Set("website", websites); err != nil { 503 return err 504 } 505 506 // Read the versioning configuration 507 versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{ 508 Bucket: aws.String(d.Id()), 509 }) 510 if err != nil { 511 return err 512 } 513 log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning) 514 if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled { 515 vcl := make([]map[string]interface{}, 0, 1) 516 vc := make(map[string]interface{}) 517 if *versioning.Status == s3.BucketVersioningStatusEnabled { 518 vc["enabled"] = true 519 } else { 520 vc["enabled"] = false 521 } 522 vcl = append(vcl, vc) 523 if err := d.Set("versioning", vcl); err != nil { 524 return err 525 } 526 } 527 528 // Read the logging configuration 529 logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ 530 Bucket: aws.String(d.Id()), 531 }) 532 if err != nil { 533 return err 534 } 535 log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging) 536 if v := logging.LoggingEnabled; v != nil { 537 lcl := make([]map[string]interface{}, 0, 1) 538 lc := make(map[string]interface{}) 539 if *v.TargetBucket != "" { 540 lc["target_bucket"] = *v.TargetBucket 541 } 542 if *v.TargetPrefix != "" { 543 lc["target_prefix"] = *v.TargetPrefix 544 } 545 lcl = append(lcl, lc) 546 if err := d.Set("logging", lcl); err != nil { 547 return err 548 } 549 } 550 551 // Read the lifecycle configuration 552 lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{ 553 Bucket: aws.String(d.Id()), 554 }) 555 if err != nil { 556 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { 557 return err 558 } 559 } 560 log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle) 561 if len(lifecycle.Rules) > 0 { 562 rules := make([]map[string]interface{}, 0, len(lifecycle.Rules)) 563 564 for _, lifecycleRule := range lifecycle.Rules { 565 rule := make(map[string]interface{}) 566 567 // ID 568 if lifecycleRule.ID != nil && *lifecycleRule.ID != "" { 569 rule["id"] = *lifecycleRule.ID 570 } 571 // Prefix 572 if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" { 573 rule["prefix"] = *lifecycleRule.Prefix 574 } 575 // Enabled 576 if lifecycleRule.Status != nil { 577 if *lifecycleRule.Status == s3.ExpirationStatusEnabled { 578 rule["enabled"] = true 579 } else { 580 rule["enabled"] = false 581 } 582 } 583 584 // AbortIncompleteMultipartUploadDays 585 if lifecycleRule.AbortIncompleteMultipartUpload != nil { 586 if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { 587 rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation) 588 } 589 } 590 591 // expiration 592 if lifecycleRule.Expiration != nil { 593 e := make(map[string]interface{}) 594 if lifecycleRule.Expiration.Date != nil { 595 e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02") 596 } 597 if lifecycleRule.Expiration.Days != nil { 598 e["days"] = int(*lifecycleRule.Expiration.Days) 599 } 600 if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { 601 e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker 602 } 603 rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 604 } 605 // noncurrent_version_expiration 606 if lifecycleRule.NoncurrentVersionExpiration != nil { 607 e := make(map[string]interface{}) 608 if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { 609 e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays) 610 } 611 rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 612 } 613 //// transition 614 if len(lifecycleRule.Transitions) > 0 { 615 transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) 616 for _, v := range lifecycleRule.Transitions { 617 t := make(map[string]interface{}) 618 if v.Date != nil { 619 t["date"] = (*v.Date).Format("2006-01-02") 620 } 621 if v.Days != nil { 622 t["days"] = int(*v.Days) 623 } 624 if v.StorageClass != nil { 625 t["storage_class"] = *v.StorageClass 626 } 627 transitions = append(transitions, t) 628 } 629 rule["transition"] = schema.NewSet(transitionHash, transitions) 630 } 631 // noncurrent_version_transition 632 if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { 633 transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) 634 for _, v := range lifecycleRule.NoncurrentVersionTransitions { 635 t := make(map[string]interface{}) 636 if v.NoncurrentDays != nil { 637 t["days"] = int(*v.NoncurrentDays) 638 } 639 if v.StorageClass != nil { 640 t["storage_class"] = *v.StorageClass 641 } 642 transitions = append(transitions, t) 643 } 644 rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) 645 } 646 647 rules = append(rules, rule) 648 } 649 650 if err := d.Set("lifecycle_rule", rules); err != nil { 651 return err 652 } 653 } 654 655 // Add the region as an attribute 656 location, err := s3conn.GetBucketLocation( 657 &s3.GetBucketLocationInput{ 658 Bucket: aws.String(d.Id()), 659 }, 660 ) 661 if err != nil { 662 return err 663 } 664 var region string 665 if location.LocationConstraint != nil { 666 region = *location.LocationConstraint 667 } 668 region = normalizeRegion(region) 669 if err := d.Set("region", region); err != nil { 670 return err 671 } 672 673 // Add the hosted zone ID for this bucket's region as an attribute 674 hostedZoneID := HostedZoneIDForRegion(region) 675 if err := d.Set("hosted_zone_id", hostedZoneID); err != nil { 676 return err 677 } 678 679 // Add website_endpoint as an attribute 680 websiteEndpoint, err := websiteEndpoint(s3conn, d) 681 if err != nil { 682 return err 683 } 684 if websiteEndpoint != nil { 685 if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil { 686 return err 687 } 688 if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil { 689 return err 690 } 691 } 692 693 tagSet, err := getTagSetS3(s3conn, d.Id()) 694 if err != nil { 695 return err 696 } 697 698 if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { 699 return err 700 } 701 702 d.Set("arn", fmt.Sprint("arn:aws:s3:::", d.Id())) 703 704 return nil 705 } 706 707 func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { 708 s3conn := meta.(*AWSClient).s3conn 709 710 log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id()) 711 _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{ 712 Bucket: aws.String(d.Id()), 713 }) 714 if err != nil { 715 ec2err, ok := err.(awserr.Error) 716 if ok && ec2err.Code() == "BucketNotEmpty" { 717 if d.Get("force_destroy").(bool) { 718 // bucket may have things delete them 719 log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) 720 721 bucket := d.Get("bucket").(string) 722 resp, err := s3conn.ListObjectVersions( 723 &s3.ListObjectVersionsInput{ 724 Bucket: aws.String(bucket), 725 }, 726 ) 727 728 if err != nil { 729 return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) 730 } 731 732 objectsToDelete := make([]*s3.ObjectIdentifier, 0) 733 734 if len(resp.DeleteMarkers) != 0 { 735 736 for _, v := range resp.DeleteMarkers { 737 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 738 Key: v.Key, 739 VersionId: v.VersionId, 740 }) 741 } 742 } 743 744 if len(resp.Versions) != 0 { 745 for _, v := range resp.Versions { 746 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 747 Key: v.Key, 748 VersionId: v.VersionId, 749 }) 750 } 751 } 752 753 params := &s3.DeleteObjectsInput{ 754 Bucket: aws.String(bucket), 755 Delete: &s3.Delete{ 756 Objects: objectsToDelete, 757 }, 758 } 759 760 _, err = s3conn.DeleteObjects(params) 761 762 if err != nil { 763 return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) 764 } 765 766 // this line recurses until all objects are deleted or an error is returned 767 return resourceAwsS3BucketDelete(d, meta) 768 } 769 } 770 return fmt.Errorf("Error deleting S3 Bucket: %s", err) 771 } 772 return nil 773 } 774 775 func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 776 bucket := d.Get("bucket").(string) 777 policy := d.Get("policy").(string) 778 779 if policy != "" { 780 log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) 781 782 params := &s3.PutBucketPolicyInput{ 783 Bucket: aws.String(bucket), 784 Policy: aws.String(policy), 785 } 786 787 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 788 if _, err := s3conn.PutBucketPolicy(params); err != nil { 789 if awserr, ok := err.(awserr.Error); ok { 790 if awserr.Code() == "MalformedPolicy" { 791 return resource.RetryableError(awserr) 792 } 793 } 794 return resource.NonRetryableError(err) 795 } 796 return nil 797 }) 798 799 if err != nil { 800 return fmt.Errorf("Error putting S3 policy: %s", err) 801 } 802 } else { 803 log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy) 804 _, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ 805 Bucket: aws.String(bucket), 806 }) 807 808 if err != nil { 809 return fmt.Errorf("Error deleting S3 policy: %s", err) 810 } 811 } 812 813 return nil 814 } 815 816 func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 817 bucket := d.Get("bucket").(string) 818 rawCors := d.Get("cors_rule").([]interface{}) 819 820 if len(rawCors) == 0 { 821 // Delete CORS 822 log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket) 823 _, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ 824 Bucket: aws.String(bucket), 825 }) 826 if err != nil { 827 return fmt.Errorf("Error deleting S3 CORS: %s", err) 828 } 829 } else { 830 // Put CORS 831 rules := make([]*s3.CORSRule, 0, len(rawCors)) 832 for _, cors := range rawCors { 833 corsMap := cors.(map[string]interface{}) 834 r := &s3.CORSRule{} 835 for k, v := range corsMap { 836 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v) 837 if k == "max_age_seconds" { 838 r.MaxAgeSeconds = aws.Int64(int64(v.(int))) 839 } else { 840 vMap := make([]*string, len(v.([]interface{}))) 841 for i, vv := range v.([]interface{}) { 842 str := vv.(string) 843 vMap[i] = aws.String(str) 844 } 845 switch k { 846 case "allowed_headers": 847 r.AllowedHeaders = vMap 848 case "allowed_methods": 849 r.AllowedMethods = vMap 850 case "allowed_origins": 851 r.AllowedOrigins = vMap 852 case "expose_headers": 853 r.ExposeHeaders = vMap 854 } 855 } 856 } 857 rules = append(rules, r) 858 } 859 corsInput := &s3.PutBucketCorsInput{ 860 Bucket: aws.String(bucket), 861 CORSConfiguration: &s3.CORSConfiguration{ 862 CORSRules: rules, 863 }, 864 } 865 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput) 866 _, err := s3conn.PutBucketCors(corsInput) 867 if err != nil { 868 return fmt.Errorf("Error putting S3 CORS: %s", err) 869 } 870 } 871 872 return nil 873 } 874 875 func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 876 ws := d.Get("website").([]interface{}) 877 878 if len(ws) == 1 { 879 var w map[string]interface{} 880 if ws[0] != nil { 881 w = ws[0].(map[string]interface{}) 882 } else { 883 w = make(map[string]interface{}) 884 } 885 return resourceAwsS3BucketWebsitePut(s3conn, d, w) 886 } else if len(ws) == 0 { 887 return resourceAwsS3BucketWebsiteDelete(s3conn, d) 888 } else { 889 return fmt.Errorf("Cannot specify more than one website.") 890 } 891 } 892 893 func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error { 894 bucket := d.Get("bucket").(string) 895 896 var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string 897 if v, ok := website["index_document"]; ok { 898 indexDocument = v.(string) 899 } 900 if v, ok := website["error_document"]; ok { 901 errorDocument = v.(string) 902 } 903 if v, ok := website["redirect_all_requests_to"]; ok { 904 redirectAllRequestsTo = v.(string) 905 } 906 if v, ok := website["routing_rules"]; ok { 907 routingRules = v.(string) 908 } 909 910 if indexDocument == "" && redirectAllRequestsTo == "" { 911 return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.") 912 } 913 914 websiteConfiguration := &s3.WebsiteConfiguration{} 915 916 if indexDocument != "" { 917 websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)} 918 } 919 920 if errorDocument != "" { 921 websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)} 922 } 923 924 if redirectAllRequestsTo != "" { 925 redirect, err := url.Parse(redirectAllRequestsTo) 926 if err == nil && redirect.Scheme != "" { 927 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirect.Host), Protocol: aws.String(redirect.Scheme)} 928 } else { 929 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} 930 } 931 } 932 933 if routingRules != "" { 934 var unmarshaledRules []*s3.RoutingRule 935 if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil { 936 return err 937 } 938 websiteConfiguration.RoutingRules = unmarshaledRules 939 } 940 941 putInput := &s3.PutBucketWebsiteInput{ 942 Bucket: aws.String(bucket), 943 WebsiteConfiguration: websiteConfiguration, 944 } 945 946 log.Printf("[DEBUG] S3 put bucket website: %#v", putInput) 947 948 _, err := s3conn.PutBucketWebsite(putInput) 949 if err != nil { 950 return fmt.Errorf("Error putting S3 website: %s", err) 951 } 952 953 return nil 954 } 955 956 func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error { 957 bucket := d.Get("bucket").(string) 958 deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)} 959 960 log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput) 961 962 _, err := s3conn.DeleteBucketWebsite(deleteInput) 963 if err != nil { 964 return fmt.Errorf("Error deleting S3 website: %s", err) 965 } 966 967 d.Set("website_endpoint", "") 968 d.Set("website_domain", "") 969 970 return nil 971 } 972 973 func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) { 974 // If the bucket doesn't have a website configuration, return an empty 975 // endpoint 976 if _, ok := d.GetOk("website"); !ok { 977 return nil, nil 978 } 979 980 bucket := d.Get("bucket").(string) 981 982 // Lookup the region for this bucket 983 location, err := s3conn.GetBucketLocation( 984 &s3.GetBucketLocationInput{ 985 Bucket: aws.String(bucket), 986 }, 987 ) 988 if err != nil { 989 return nil, err 990 } 991 var region string 992 if location.LocationConstraint != nil { 993 region = *location.LocationConstraint 994 } 995 996 return WebsiteEndpoint(bucket, region), nil 997 } 998 999 func WebsiteEndpoint(bucket string, region string) *S3Website { 1000 domain := WebsiteDomainUrl(region) 1001 return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} 1002 } 1003 1004 func WebsiteDomainUrl(region string) string { 1005 region = normalizeRegion(region) 1006 1007 // Frankfurt(and probably future) regions uses different syntax for website endpoints 1008 // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html 1009 if region == "eu-central-1" { 1010 return fmt.Sprintf("s3-website.%s.amazonaws.com", region) 1011 } 1012 1013 return fmt.Sprintf("s3-website-%s.amazonaws.com", region) 1014 } 1015 1016 func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1017 acl := d.Get("acl").(string) 1018 bucket := d.Get("bucket").(string) 1019 1020 i := &s3.PutBucketAclInput{ 1021 Bucket: aws.String(bucket), 1022 ACL: aws.String(acl), 1023 } 1024 log.Printf("[DEBUG] S3 put bucket ACL: %#v", i) 1025 1026 _, err := s3conn.PutBucketAcl(i) 1027 if err != nil { 1028 return fmt.Errorf("Error putting S3 ACL: %s", err) 1029 } 1030 1031 return nil 1032 } 1033 1034 func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1035 v := d.Get("versioning").(*schema.Set).List() 1036 bucket := d.Get("bucket").(string) 1037 vc := &s3.VersioningConfiguration{} 1038 1039 if len(v) > 0 { 1040 c := v[0].(map[string]interface{}) 1041 1042 if c["enabled"].(bool) { 1043 vc.Status = aws.String(s3.BucketVersioningStatusEnabled) 1044 } else { 1045 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1046 } 1047 } else { 1048 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1049 } 1050 1051 i := &s3.PutBucketVersioningInput{ 1052 Bucket: aws.String(bucket), 1053 VersioningConfiguration: vc, 1054 } 1055 log.Printf("[DEBUG] S3 put bucket versioning: %#v", i) 1056 1057 _, err := s3conn.PutBucketVersioning(i) 1058 if err != nil { 1059 return fmt.Errorf("Error putting S3 versioning: %s", err) 1060 } 1061 1062 return nil 1063 } 1064 1065 func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1066 logging := d.Get("logging").(*schema.Set).List() 1067 bucket := d.Get("bucket").(string) 1068 loggingStatus := &s3.BucketLoggingStatus{} 1069 1070 if len(logging) > 0 { 1071 c := logging[0].(map[string]interface{}) 1072 1073 loggingEnabled := &s3.LoggingEnabled{} 1074 if val, ok := c["target_bucket"]; ok { 1075 loggingEnabled.TargetBucket = aws.String(val.(string)) 1076 } 1077 if val, ok := c["target_prefix"]; ok { 1078 loggingEnabled.TargetPrefix = aws.String(val.(string)) 1079 } 1080 1081 loggingStatus.LoggingEnabled = loggingEnabled 1082 } 1083 1084 i := &s3.PutBucketLoggingInput{ 1085 Bucket: aws.String(bucket), 1086 BucketLoggingStatus: loggingStatus, 1087 } 1088 log.Printf("[DEBUG] S3 put bucket logging: %#v", i) 1089 1090 _, err := s3conn.PutBucketLogging(i) 1091 if err != nil { 1092 return fmt.Errorf("Error putting S3 logging: %s", err) 1093 } 1094 1095 return nil 1096 } 1097 1098 func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1099 bucket := d.Get("bucket").(string) 1100 1101 lifecycleRules := d.Get("lifecycle_rule").([]interface{}) 1102 1103 rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) 1104 1105 for i, lifecycleRule := range lifecycleRules { 1106 r := lifecycleRule.(map[string]interface{}) 1107 1108 rule := &s3.LifecycleRule{ 1109 Prefix: aws.String(r["prefix"].(string)), 1110 } 1111 1112 // ID 1113 if val, ok := r["id"].(string); ok && val != "" { 1114 rule.ID = aws.String(val) 1115 } else { 1116 rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) 1117 } 1118 1119 // Enabled 1120 if val, ok := r["enabled"].(bool); ok && val { 1121 rule.Status = aws.String(s3.ExpirationStatusEnabled) 1122 } else { 1123 rule.Status = aws.String(s3.ExpirationStatusDisabled) 1124 } 1125 1126 // AbortIncompleteMultipartUpload 1127 if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { 1128 rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ 1129 DaysAfterInitiation: aws.Int64(int64(val)), 1130 } 1131 } 1132 1133 // Expiration 1134 expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List() 1135 if len(expiration) > 0 { 1136 e := expiration[0].(map[string]interface{}) 1137 i := &s3.LifecycleExpiration{} 1138 1139 if val, ok := e["date"].(string); ok && val != "" { 1140 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1141 if err != nil { 1142 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1143 } 1144 i.Date = aws.Time(t) 1145 } else if val, ok := e["days"].(int); ok && val > 0 { 1146 i.Days = aws.Int64(int64(val)) 1147 } else if val, ok := e["expired_object_delete_marker"].(bool); ok { 1148 i.ExpiredObjectDeleteMarker = aws.Bool(val) 1149 } 1150 rule.Expiration = i 1151 } 1152 1153 // NoncurrentVersionExpiration 1154 nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List() 1155 if len(nc_expiration) > 0 { 1156 e := nc_expiration[0].(map[string]interface{}) 1157 1158 if val, ok := e["days"].(int); ok && val > 0 { 1159 rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ 1160 NoncurrentDays: aws.Int64(int64(val)), 1161 } 1162 } 1163 } 1164 1165 // Transitions 1166 transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() 1167 if len(transitions) > 0 { 1168 rule.Transitions = make([]*s3.Transition, 0, len(transitions)) 1169 for _, transition := range transitions { 1170 transition := transition.(map[string]interface{}) 1171 i := &s3.Transition{} 1172 if val, ok := transition["date"].(string); ok && val != "" { 1173 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1174 if err != nil { 1175 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1176 } 1177 i.Date = aws.Time(t) 1178 } else if val, ok := transition["days"].(int); ok && val > 0 { 1179 i.Days = aws.Int64(int64(val)) 1180 } 1181 if val, ok := transition["storage_class"].(string); ok && val != "" { 1182 i.StorageClass = aws.String(val) 1183 } 1184 1185 rule.Transitions = append(rule.Transitions, i) 1186 } 1187 } 1188 // NoncurrentVersionTransitions 1189 nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() 1190 if len(nc_transitions) > 0 { 1191 rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) 1192 for _, transition := range nc_transitions { 1193 transition := transition.(map[string]interface{}) 1194 i := &s3.NoncurrentVersionTransition{} 1195 if val, ok := transition["days"].(int); ok && val > 0 { 1196 i.NoncurrentDays = aws.Int64(int64(val)) 1197 } 1198 if val, ok := transition["storage_class"].(string); ok && val != "" { 1199 i.StorageClass = aws.String(val) 1200 } 1201 1202 rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) 1203 } 1204 } 1205 1206 rules = append(rules, rule) 1207 } 1208 1209 i := &s3.PutBucketLifecycleConfigurationInput{ 1210 Bucket: aws.String(bucket), 1211 LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ 1212 Rules: rules, 1213 }, 1214 } 1215 1216 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1217 if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil { 1218 return resource.NonRetryableError(err) 1219 } 1220 return nil 1221 }) 1222 if err != nil { 1223 return fmt.Errorf("Error putting S3 lifecycle: %s", err) 1224 } 1225 1226 return nil 1227 } 1228 1229 func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { 1230 withNulls, err := json.Marshal(w) 1231 if err != nil { 1232 return "", err 1233 } 1234 1235 var rules []map[string]interface{} 1236 json.Unmarshal(withNulls, &rules) 1237 1238 var cleanRules []map[string]interface{} 1239 for _, rule := range rules { 1240 cleanRules = append(cleanRules, removeNil(rule)) 1241 } 1242 1243 withoutNulls, err := json.Marshal(cleanRules) 1244 if err != nil { 1245 return "", err 1246 } 1247 1248 return string(withoutNulls), nil 1249 } 1250 1251 func removeNil(data map[string]interface{}) map[string]interface{} { 1252 withoutNil := make(map[string]interface{}) 1253 1254 for k, v := range data { 1255 if v == nil { 1256 continue 1257 } 1258 1259 switch v.(type) { 1260 case map[string]interface{}: 1261 withoutNil[k] = removeNil(v.(map[string]interface{})) 1262 default: 1263 withoutNil[k] = v 1264 } 1265 } 1266 1267 return withoutNil 1268 } 1269 1270 func normalizeJson(jsonString interface{}) string { 1271 if jsonString == nil || jsonString == "" { 1272 return "" 1273 } 1274 var j interface{} 1275 err := json.Unmarshal([]byte(jsonString.(string)), &j) 1276 if err != nil { 1277 return fmt.Sprintf("Error parsing JSON: %s", err) 1278 } 1279 b, _ := json.Marshal(j) 1280 return string(b[:]) 1281 } 1282 1283 func normalizeRegion(region string) string { 1284 // Default to us-east-1 if the bucket doesn't have a region: 1285 // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html 1286 if region == "" { 1287 region = "us-east-1" 1288 } 1289 1290 return region 1291 } 1292 1293 func expirationHash(v interface{}) int { 1294 var buf bytes.Buffer 1295 m := v.(map[string]interface{}) 1296 if v, ok := m["date"]; ok { 1297 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1298 } 1299 if v, ok := m["days"]; ok { 1300 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1301 } 1302 if v, ok := m["expired_object_delete_marker"]; ok { 1303 buf.WriteString(fmt.Sprintf("%t-", v.(bool))) 1304 } 1305 return hashcode.String(buf.String()) 1306 } 1307 1308 func transitionHash(v interface{}) int { 1309 var buf bytes.Buffer 1310 m := v.(map[string]interface{}) 1311 if v, ok := m["date"]; ok { 1312 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1313 } 1314 if v, ok := m["days"]; ok { 1315 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1316 } 1317 if v, ok := m["storage_class"]; ok { 1318 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1319 } 1320 return hashcode.String(buf.String()) 1321 } 1322 1323 type S3Website struct { 1324 Endpoint, Domain string 1325 }