github.com/mohanarpit/terraform@v0.6.16-0.20160909104007-291f29853544/builtin/providers/aws/resource_aws_s3_bucket.go (about) 1 package aws 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "log" 8 "net/url" 9 "time" 10 11 "github.com/aws/aws-sdk-go/aws" 12 "github.com/aws/aws-sdk-go/aws/awserr" 13 "github.com/aws/aws-sdk-go/service/s3" 14 "github.com/hashicorp/terraform/helper/hashcode" 15 "github.com/hashicorp/terraform/helper/resource" 16 "github.com/hashicorp/terraform/helper/schema" 17 ) 18 19 func resourceAwsS3Bucket() *schema.Resource { 20 return &schema.Resource{ 21 Create: resourceAwsS3BucketCreate, 22 Read: resourceAwsS3BucketRead, 23 Update: resourceAwsS3BucketUpdate, 24 Delete: resourceAwsS3BucketDelete, 25 Importer: &schema.ResourceImporter{ 26 State: schema.ImportStatePassthrough, 27 }, 28 29 Schema: map[string]*schema.Schema{ 30 "bucket": &schema.Schema{ 31 Type: schema.TypeString, 32 Required: true, 33 ForceNew: true, 34 }, 35 36 "arn": &schema.Schema{ 37 Type: schema.TypeString, 38 Optional: true, 39 Computed: true, 40 }, 41 42 "acl": &schema.Schema{ 43 Type: schema.TypeString, 44 Default: "private", 45 Optional: true, 46 }, 47 48 "policy": &schema.Schema{ 49 Type: schema.TypeString, 50 Optional: true, 51 Computed: true, 52 DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, 53 }, 54 55 "cors_rule": &schema.Schema{ 56 Type: schema.TypeList, 57 Optional: true, 58 Elem: &schema.Resource{ 59 Schema: map[string]*schema.Schema{ 60 "allowed_headers": &schema.Schema{ 61 Type: schema.TypeList, 62 Optional: true, 63 Elem: &schema.Schema{Type: schema.TypeString}, 64 }, 65 "allowed_methods": &schema.Schema{ 66 Type: schema.TypeList, 67 Required: true, 68 Elem: &schema.Schema{Type: schema.TypeString}, 69 }, 70 "allowed_origins": &schema.Schema{ 71 Type: schema.TypeList, 72 Required: true, 73 Elem: &schema.Schema{Type: schema.TypeString}, 74 }, 75 "expose_headers": &schema.Schema{ 76 Type: schema.TypeList, 77 Optional: true, 78 Elem: &schema.Schema{Type: schema.TypeString}, 79 }, 80 "max_age_seconds": &schema.Schema{ 81 Type: schema.TypeInt, 82 Optional: true, 83 }, 84 }, 85 }, 86 }, 87 88 "website": &schema.Schema{ 89 Type: schema.TypeList, 90 Optional: true, 91 Elem: &schema.Resource{ 92 Schema: map[string]*schema.Schema{ 93 "index_document": &schema.Schema{ 94 Type: schema.TypeString, 95 Optional: true, 96 }, 97 98 "error_document": &schema.Schema{ 99 Type: schema.TypeString, 100 Optional: true, 101 }, 102 103 "redirect_all_requests_to": &schema.Schema{ 104 Type: schema.TypeString, 105 ConflictsWith: []string{ 106 "website.0.index_document", 107 "website.0.error_document", 108 "website.0.routing_rules", 109 }, 110 Optional: true, 111 }, 112 113 "routing_rules": &schema.Schema{ 114 Type: schema.TypeString, 115 Optional: true, 116 StateFunc: normalizeJson, 117 }, 118 }, 119 }, 120 }, 121 122 "hosted_zone_id": &schema.Schema{ 123 Type: schema.TypeString, 124 Optional: true, 125 Computed: true, 126 }, 127 128 "region": &schema.Schema{ 129 Type: schema.TypeString, 130 Optional: true, 131 Computed: true, 132 }, 133 "website_endpoint": &schema.Schema{ 134 Type: schema.TypeString, 135 Optional: true, 136 Computed: true, 137 }, 138 "website_domain": &schema.Schema{ 139 Type: schema.TypeString, 140 Optional: true, 141 Computed: true, 142 }, 143 144 "versioning": &schema.Schema{ 145 Type: schema.TypeSet, 146 Optional: true, 147 Elem: &schema.Resource{ 148 Schema: map[string]*schema.Schema{ 149 "enabled": &schema.Schema{ 150 Type: schema.TypeBool, 151 Optional: true, 152 Default: false, 153 }, 154 }, 155 }, 156 Set: func(v interface{}) int { 157 var buf bytes.Buffer 158 m := v.(map[string]interface{}) 159 buf.WriteString(fmt.Sprintf("%t-", m["enabled"].(bool))) 160 161 return hashcode.String(buf.String()) 162 }, 163 }, 164 165 "logging": &schema.Schema{ 166 Type: schema.TypeSet, 167 Optional: true, 168 Elem: &schema.Resource{ 169 Schema: map[string]*schema.Schema{ 170 "target_bucket": &schema.Schema{ 171 Type: schema.TypeString, 172 Required: true, 173 }, 174 "target_prefix": &schema.Schema{ 175 Type: schema.TypeString, 176 Optional: true, 177 }, 178 }, 179 }, 180 Set: func(v interface{}) int { 181 var buf bytes.Buffer 182 m := v.(map[string]interface{}) 183 buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"])) 184 buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"])) 185 return hashcode.String(buf.String()) 186 }, 187 }, 188 189 "lifecycle_rule": &schema.Schema{ 190 Type: schema.TypeList, 191 Optional: true, 192 Elem: &schema.Resource{ 193 Schema: map[string]*schema.Schema{ 194 "id": &schema.Schema{ 195 Type: schema.TypeString, 196 Optional: true, 197 Computed: true, 198 ValidateFunc: validateS3BucketLifecycleRuleId, 199 }, 200 "prefix": &schema.Schema{ 201 Type: schema.TypeString, 202 Required: true, 203 }, 204 "enabled": &schema.Schema{ 205 Type: schema.TypeBool, 206 Required: true, 207 }, 208 "abort_incomplete_multipart_upload_days": &schema.Schema{ 209 Type: schema.TypeInt, 210 Optional: true, 211 }, 212 "expiration": &schema.Schema{ 213 Type: schema.TypeSet, 214 Optional: true, 215 Set: expirationHash, 216 Elem: &schema.Resource{ 217 Schema: map[string]*schema.Schema{ 218 "date": &schema.Schema{ 219 Type: schema.TypeString, 220 Optional: true, 221 ValidateFunc: validateS3BucketLifecycleTimestamp, 222 }, 223 "days": &schema.Schema{ 224 Type: schema.TypeInt, 225 Optional: true, 226 }, 227 "expired_object_delete_marker": &schema.Schema{ 228 Type: schema.TypeBool, 229 Optional: true, 230 }, 231 }, 232 }, 233 }, 234 "noncurrent_version_expiration": &schema.Schema{ 235 Type: schema.TypeSet, 236 Optional: true, 237 Set: expirationHash, 238 Elem: &schema.Resource{ 239 Schema: map[string]*schema.Schema{ 240 "days": &schema.Schema{ 241 Type: schema.TypeInt, 242 Optional: true, 243 }, 244 }, 245 }, 246 }, 247 "transition": &schema.Schema{ 248 Type: schema.TypeSet, 249 Optional: true, 250 Set: transitionHash, 251 Elem: &schema.Resource{ 252 Schema: map[string]*schema.Schema{ 253 "date": &schema.Schema{ 254 Type: schema.TypeString, 255 Optional: true, 256 ValidateFunc: validateS3BucketLifecycleTimestamp, 257 }, 258 "days": &schema.Schema{ 259 Type: schema.TypeInt, 260 Optional: true, 261 }, 262 "storage_class": &schema.Schema{ 263 Type: schema.TypeString, 264 Required: true, 265 ValidateFunc: validateS3BucketLifecycleStorageClass, 266 }, 267 }, 268 }, 269 }, 270 "noncurrent_version_transition": &schema.Schema{ 271 Type: schema.TypeSet, 272 Optional: true, 273 Set: transitionHash, 274 Elem: &schema.Resource{ 275 Schema: map[string]*schema.Schema{ 276 "days": &schema.Schema{ 277 Type: schema.TypeInt, 278 Optional: true, 279 }, 280 "storage_class": &schema.Schema{ 281 Type: schema.TypeString, 282 Required: true, 283 ValidateFunc: validateS3BucketLifecycleStorageClass, 284 }, 285 }, 286 }, 287 }, 288 }, 289 }, 290 }, 291 292 "force_destroy": &schema.Schema{ 293 Type: schema.TypeBool, 294 Optional: true, 295 Default: false, 296 }, 297 298 "acceleration_status": &schema.Schema{ 299 Type: schema.TypeString, 300 Optional: true, 301 Computed: true, 302 ValidateFunc: validateS3BucketAccelerationStatus, 303 }, 304 305 "request_payer": &schema.Schema{ 306 Type: schema.TypeString, 307 Optional: true, 308 Computed: true, 309 ValidateFunc: validateS3BucketRequestPayerType, 310 }, 311 312 "tags": tagsSchema(), 313 }, 314 } 315 } 316 317 func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { 318 s3conn := meta.(*AWSClient).s3conn 319 awsRegion := meta.(*AWSClient).region 320 321 // Get the bucket and acl 322 bucket := d.Get("bucket").(string) 323 acl := d.Get("acl").(string) 324 325 log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl) 326 327 req := &s3.CreateBucketInput{ 328 Bucket: aws.String(bucket), 329 ACL: aws.String(acl), 330 } 331 332 // Special case us-east-1 region and do not set the LocationConstraint. 333 // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html 334 if awsRegion != "us-east-1" { 335 req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ 336 LocationConstraint: aws.String(awsRegion), 337 } 338 } 339 340 err := resource.Retry(5*time.Minute, func() *resource.RetryError { 341 log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket) 342 _, err := s3conn.CreateBucket(req) 343 if awsErr, ok := err.(awserr.Error); ok { 344 if awsErr.Code() == "OperationAborted" { 345 log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err) 346 return resource.RetryableError( 347 fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s", 348 bucket, err)) 349 } 350 } 351 if err != nil { 352 return resource.NonRetryableError(err) 353 } 354 355 return nil 356 }) 357 358 if err != nil { 359 return fmt.Errorf("Error creating S3 bucket: %s", err) 360 } 361 362 // Assign the bucket name as the resource ID 363 d.SetId(bucket) 364 365 return resourceAwsS3BucketUpdate(d, meta) 366 } 367 368 func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { 369 s3conn := meta.(*AWSClient).s3conn 370 if err := setTagsS3(s3conn, d); err != nil { 371 return err 372 } 373 374 if d.HasChange("policy") { 375 if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil { 376 return err 377 } 378 } 379 380 if d.HasChange("cors_rule") { 381 if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil { 382 return err 383 } 384 } 385 386 if d.HasChange("website") { 387 if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { 388 return err 389 } 390 } 391 392 if d.HasChange("versioning") { 393 if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil { 394 return err 395 } 396 } 397 if d.HasChange("acl") { 398 if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil { 399 return err 400 } 401 } 402 403 if d.HasChange("logging") { 404 if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil { 405 return err 406 } 407 } 408 409 if d.HasChange("lifecycle_rule") { 410 if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil { 411 return err 412 } 413 } 414 415 if d.HasChange("acceleration_status") { 416 if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil { 417 return err 418 } 419 } 420 421 if d.HasChange("request_payer") { 422 if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil { 423 return err 424 } 425 } 426 427 return resourceAwsS3BucketRead(d, meta) 428 } 429 430 func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { 431 s3conn := meta.(*AWSClient).s3conn 432 433 var err error 434 _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ 435 Bucket: aws.String(d.Id()), 436 }) 437 if err != nil { 438 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { 439 log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) 440 d.SetId("") 441 return nil 442 } else { 443 // some of the AWS SDK's errors can be empty strings, so let's add 444 // some additional context. 445 return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) 446 } 447 } 448 449 // In the import case, we won't have this 450 if _, ok := d.GetOk("bucket"); !ok { 451 d.Set("bucket", d.Id()) 452 } 453 454 // Read the policy 455 pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ 456 Bucket: aws.String(d.Id()), 457 }) 458 log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol) 459 if err != nil { 460 if err := d.Set("policy", ""); err != nil { 461 return err 462 } 463 } else { 464 if v := pol.Policy; v == nil { 465 if err := d.Set("policy", ""); err != nil { 466 return err 467 } 468 } else if err := d.Set("policy", normalizeJson(*v)); err != nil { 469 return err 470 } 471 } 472 473 // Read the CORS 474 cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ 475 Bucket: aws.String(d.Id()), 476 }) 477 if err != nil { 478 // An S3 Bucket might not have CORS configuration set. 479 if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" { 480 return err 481 } 482 log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id()) 483 } 484 log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) 485 if cors.CORSRules != nil { 486 rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) 487 for _, ruleObject := range cors.CORSRules { 488 rule := make(map[string]interface{}) 489 rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders) 490 rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods) 491 rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins) 492 // Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set. 493 if ruleObject.AllowedOrigins != nil { 494 rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders) 495 } 496 if ruleObject.MaxAgeSeconds != nil { 497 rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds) 498 } 499 rules = append(rules, rule) 500 } 501 if err := d.Set("cors_rule", rules); err != nil { 502 return err 503 } 504 } 505 506 // Read the website configuration 507 ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ 508 Bucket: aws.String(d.Id()), 509 }) 510 var websites []map[string]interface{} 511 if err == nil { 512 w := make(map[string]interface{}) 513 514 if v := ws.IndexDocument; v != nil { 515 w["index_document"] = *v.Suffix 516 } 517 518 if v := ws.ErrorDocument; v != nil { 519 w["error_document"] = *v.Key 520 } 521 522 if v := ws.RedirectAllRequestsTo; v != nil { 523 if v.Protocol == nil { 524 w["redirect_all_requests_to"] = *v.HostName 525 } else { 526 var host string 527 var path string 528 parsedHostName, err := url.Parse(*v.HostName) 529 if err == nil { 530 host = parsedHostName.Host 531 path = parsedHostName.Path 532 } else { 533 host = *v.HostName 534 path = "" 535 } 536 537 w["redirect_all_requests_to"] = (&url.URL{ 538 Host: host, 539 Path: path, 540 Scheme: *v.Protocol, 541 }).String() 542 } 543 } 544 545 if v := ws.RoutingRules; v != nil { 546 rr, err := normalizeRoutingRules(v) 547 if err != nil { 548 return fmt.Errorf("Error while marshaling routing rules: %s", err) 549 } 550 w["routing_rules"] = rr 551 } 552 553 websites = append(websites, w) 554 } 555 if err := d.Set("website", websites); err != nil { 556 return err 557 } 558 559 // Read the versioning configuration 560 versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{ 561 Bucket: aws.String(d.Id()), 562 }) 563 if err != nil { 564 return err 565 } 566 log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning) 567 if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled { 568 vcl := make([]map[string]interface{}, 0, 1) 569 vc := make(map[string]interface{}) 570 if *versioning.Status == s3.BucketVersioningStatusEnabled { 571 vc["enabled"] = true 572 } else { 573 vc["enabled"] = false 574 } 575 vcl = append(vcl, vc) 576 if err := d.Set("versioning", vcl); err != nil { 577 return err 578 } 579 } 580 581 //read the acceleration status 582 accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{ 583 Bucket: aws.String(d.Id()), 584 }) 585 if err != nil { 586 // Amazon S3 Transfer Acceleration might not be supported in the 587 // given region, for example, China (Beijing) and the Government 588 // Cloud does not support this feature at the moment. 589 if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" { 590 return err 591 } 592 log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Accelaration is not supported in the region: %s", d.Id(), meta.(*AWSClient).region) 593 } else { 594 log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) 595 d.Set("acceleration_status", accelerate.Status) 596 } 597 598 // Read the request payer configuration. 599 payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{ 600 Bucket: aws.String(d.Id()), 601 }) 602 if err != nil { 603 return err 604 } 605 log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer) 606 if payer.Payer != nil { 607 if err := d.Set("request_payer", *payer.Payer); err != nil { 608 return err 609 } 610 } 611 612 // Read the logging configuration 613 logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ 614 Bucket: aws.String(d.Id()), 615 }) 616 if err != nil { 617 return err 618 } 619 620 log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging) 621 if v := logging.LoggingEnabled; v != nil { 622 lcl := make([]map[string]interface{}, 0, 1) 623 lc := make(map[string]interface{}) 624 if *v.TargetBucket != "" { 625 lc["target_bucket"] = *v.TargetBucket 626 } 627 if *v.TargetPrefix != "" { 628 lc["target_prefix"] = *v.TargetPrefix 629 } 630 lcl = append(lcl, lc) 631 if err := d.Set("logging", lcl); err != nil { 632 return err 633 } 634 } 635 636 // Read the lifecycle configuration 637 lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{ 638 Bucket: aws.String(d.Id()), 639 }) 640 if err != nil { 641 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { 642 return err 643 } 644 } 645 log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle) 646 if len(lifecycle.Rules) > 0 { 647 rules := make([]map[string]interface{}, 0, len(lifecycle.Rules)) 648 649 for _, lifecycleRule := range lifecycle.Rules { 650 rule := make(map[string]interface{}) 651 652 // ID 653 if lifecycleRule.ID != nil && *lifecycleRule.ID != "" { 654 rule["id"] = *lifecycleRule.ID 655 } 656 // Prefix 657 if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" { 658 rule["prefix"] = *lifecycleRule.Prefix 659 } 660 // Enabled 661 if lifecycleRule.Status != nil { 662 if *lifecycleRule.Status == s3.ExpirationStatusEnabled { 663 rule["enabled"] = true 664 } else { 665 rule["enabled"] = false 666 } 667 } 668 669 // AbortIncompleteMultipartUploadDays 670 if lifecycleRule.AbortIncompleteMultipartUpload != nil { 671 if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { 672 rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation) 673 } 674 } 675 676 // expiration 677 if lifecycleRule.Expiration != nil { 678 e := make(map[string]interface{}) 679 if lifecycleRule.Expiration.Date != nil { 680 e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02") 681 } 682 if lifecycleRule.Expiration.Days != nil { 683 e["days"] = int(*lifecycleRule.Expiration.Days) 684 } 685 if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { 686 e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker 687 } 688 rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 689 } 690 // noncurrent_version_expiration 691 if lifecycleRule.NoncurrentVersionExpiration != nil { 692 e := make(map[string]interface{}) 693 if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { 694 e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays) 695 } 696 rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 697 } 698 //// transition 699 if len(lifecycleRule.Transitions) > 0 { 700 transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) 701 for _, v := range lifecycleRule.Transitions { 702 t := make(map[string]interface{}) 703 if v.Date != nil { 704 t["date"] = (*v.Date).Format("2006-01-02") 705 } 706 if v.Days != nil { 707 t["days"] = int(*v.Days) 708 } 709 if v.StorageClass != nil { 710 t["storage_class"] = *v.StorageClass 711 } 712 transitions = append(transitions, t) 713 } 714 rule["transition"] = schema.NewSet(transitionHash, transitions) 715 } 716 // noncurrent_version_transition 717 if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { 718 transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) 719 for _, v := range lifecycleRule.NoncurrentVersionTransitions { 720 t := make(map[string]interface{}) 721 if v.NoncurrentDays != nil { 722 t["days"] = int(*v.NoncurrentDays) 723 } 724 if v.StorageClass != nil { 725 t["storage_class"] = *v.StorageClass 726 } 727 transitions = append(transitions, t) 728 } 729 rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) 730 } 731 732 rules = append(rules, rule) 733 } 734 735 if err := d.Set("lifecycle_rule", rules); err != nil { 736 return err 737 } 738 } 739 740 // Add the region as an attribute 741 location, err := s3conn.GetBucketLocation( 742 &s3.GetBucketLocationInput{ 743 Bucket: aws.String(d.Id()), 744 }, 745 ) 746 if err != nil { 747 return err 748 } 749 var region string 750 if location.LocationConstraint != nil { 751 region = *location.LocationConstraint 752 } 753 region = normalizeRegion(region) 754 if err := d.Set("region", region); err != nil { 755 return err 756 } 757 758 // Add the hosted zone ID for this bucket's region as an attribute 759 hostedZoneID := HostedZoneIDForRegion(region) 760 if err := d.Set("hosted_zone_id", hostedZoneID); err != nil { 761 return err 762 } 763 764 // Add website_endpoint as an attribute 765 websiteEndpoint, err := websiteEndpoint(s3conn, d) 766 if err != nil { 767 return err 768 } 769 if websiteEndpoint != nil { 770 if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil { 771 return err 772 } 773 if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil { 774 return err 775 } 776 } 777 778 tagSet, err := getTagSetS3(s3conn, d.Id()) 779 if err != nil { 780 return err 781 } 782 783 if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { 784 return err 785 } 786 787 d.Set("arn", fmt.Sprint("arn:aws:s3:::", d.Id())) 788 789 return nil 790 } 791 792 func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { 793 s3conn := meta.(*AWSClient).s3conn 794 795 log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id()) 796 _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{ 797 Bucket: aws.String(d.Id()), 798 }) 799 if err != nil { 800 ec2err, ok := err.(awserr.Error) 801 if ok && ec2err.Code() == "BucketNotEmpty" { 802 if d.Get("force_destroy").(bool) { 803 // bucket may have things delete them 804 log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) 805 806 bucket := d.Get("bucket").(string) 807 resp, err := s3conn.ListObjectVersions( 808 &s3.ListObjectVersionsInput{ 809 Bucket: aws.String(bucket), 810 }, 811 ) 812 813 if err != nil { 814 return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) 815 } 816 817 objectsToDelete := make([]*s3.ObjectIdentifier, 0) 818 819 if len(resp.DeleteMarkers) != 0 { 820 821 for _, v := range resp.DeleteMarkers { 822 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 823 Key: v.Key, 824 VersionId: v.VersionId, 825 }) 826 } 827 } 828 829 if len(resp.Versions) != 0 { 830 for _, v := range resp.Versions { 831 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 832 Key: v.Key, 833 VersionId: v.VersionId, 834 }) 835 } 836 } 837 838 params := &s3.DeleteObjectsInput{ 839 Bucket: aws.String(bucket), 840 Delete: &s3.Delete{ 841 Objects: objectsToDelete, 842 }, 843 } 844 845 _, err = s3conn.DeleteObjects(params) 846 847 if err != nil { 848 return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) 849 } 850 851 // this line recurses until all objects are deleted or an error is returned 852 return resourceAwsS3BucketDelete(d, meta) 853 } 854 } 855 return fmt.Errorf("Error deleting S3 Bucket: %s", err) 856 } 857 return nil 858 } 859 860 func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 861 bucket := d.Get("bucket").(string) 862 policy := d.Get("policy").(string) 863 864 if policy != "" { 865 log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) 866 867 params := &s3.PutBucketPolicyInput{ 868 Bucket: aws.String(bucket), 869 Policy: aws.String(policy), 870 } 871 872 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 873 if _, err := s3conn.PutBucketPolicy(params); err != nil { 874 if awserr, ok := err.(awserr.Error); ok { 875 if awserr.Code() == "MalformedPolicy" { 876 return resource.RetryableError(awserr) 877 } 878 } 879 return resource.NonRetryableError(err) 880 } 881 return nil 882 }) 883 884 if err != nil { 885 return fmt.Errorf("Error putting S3 policy: %s", err) 886 } 887 } else { 888 log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy) 889 _, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ 890 Bucket: aws.String(bucket), 891 }) 892 893 if err != nil { 894 return fmt.Errorf("Error deleting S3 policy: %s", err) 895 } 896 } 897 898 return nil 899 } 900 901 func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 902 bucket := d.Get("bucket").(string) 903 rawCors := d.Get("cors_rule").([]interface{}) 904 905 if len(rawCors) == 0 { 906 // Delete CORS 907 log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket) 908 _, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ 909 Bucket: aws.String(bucket), 910 }) 911 if err != nil { 912 return fmt.Errorf("Error deleting S3 CORS: %s", err) 913 } 914 } else { 915 // Put CORS 916 rules := make([]*s3.CORSRule, 0, len(rawCors)) 917 for _, cors := range rawCors { 918 corsMap := cors.(map[string]interface{}) 919 r := &s3.CORSRule{} 920 for k, v := range corsMap { 921 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v) 922 if k == "max_age_seconds" { 923 r.MaxAgeSeconds = aws.Int64(int64(v.(int))) 924 } else { 925 vMap := make([]*string, len(v.([]interface{}))) 926 for i, vv := range v.([]interface{}) { 927 str := vv.(string) 928 vMap[i] = aws.String(str) 929 } 930 switch k { 931 case "allowed_headers": 932 r.AllowedHeaders = vMap 933 case "allowed_methods": 934 r.AllowedMethods = vMap 935 case "allowed_origins": 936 r.AllowedOrigins = vMap 937 case "expose_headers": 938 r.ExposeHeaders = vMap 939 } 940 } 941 } 942 rules = append(rules, r) 943 } 944 corsInput := &s3.PutBucketCorsInput{ 945 Bucket: aws.String(bucket), 946 CORSConfiguration: &s3.CORSConfiguration{ 947 CORSRules: rules, 948 }, 949 } 950 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput) 951 _, err := s3conn.PutBucketCors(corsInput) 952 if err != nil { 953 return fmt.Errorf("Error putting S3 CORS: %s", err) 954 } 955 } 956 957 return nil 958 } 959 960 func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 961 ws := d.Get("website").([]interface{}) 962 963 if len(ws) == 1 { 964 var w map[string]interface{} 965 if ws[0] != nil { 966 w = ws[0].(map[string]interface{}) 967 } else { 968 w = make(map[string]interface{}) 969 } 970 return resourceAwsS3BucketWebsitePut(s3conn, d, w) 971 } else if len(ws) == 0 { 972 return resourceAwsS3BucketWebsiteDelete(s3conn, d) 973 } else { 974 return fmt.Errorf("Cannot specify more than one website.") 975 } 976 } 977 978 func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error { 979 bucket := d.Get("bucket").(string) 980 981 var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string 982 if v, ok := website["index_document"]; ok { 983 indexDocument = v.(string) 984 } 985 if v, ok := website["error_document"]; ok { 986 errorDocument = v.(string) 987 } 988 if v, ok := website["redirect_all_requests_to"]; ok { 989 redirectAllRequestsTo = v.(string) 990 } 991 if v, ok := website["routing_rules"]; ok { 992 routingRules = v.(string) 993 } 994 995 if indexDocument == "" && redirectAllRequestsTo == "" { 996 return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.") 997 } 998 999 websiteConfiguration := &s3.WebsiteConfiguration{} 1000 1001 if indexDocument != "" { 1002 websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)} 1003 } 1004 1005 if errorDocument != "" { 1006 websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)} 1007 } 1008 1009 if redirectAllRequestsTo != "" { 1010 redirect, err := url.Parse(redirectAllRequestsTo) 1011 if err == nil && redirect.Scheme != "" { 1012 var redirectHostBuf bytes.Buffer 1013 redirectHostBuf.WriteString(redirect.Host) 1014 if redirect.Path != "" { 1015 redirectHostBuf.WriteString(redirect.Path) 1016 } 1017 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)} 1018 } else { 1019 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} 1020 } 1021 } 1022 1023 if routingRules != "" { 1024 var unmarshaledRules []*s3.RoutingRule 1025 if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil { 1026 return err 1027 } 1028 websiteConfiguration.RoutingRules = unmarshaledRules 1029 } 1030 1031 putInput := &s3.PutBucketWebsiteInput{ 1032 Bucket: aws.String(bucket), 1033 WebsiteConfiguration: websiteConfiguration, 1034 } 1035 1036 log.Printf("[DEBUG] S3 put bucket website: %#v", putInput) 1037 1038 _, err := s3conn.PutBucketWebsite(putInput) 1039 if err != nil { 1040 return fmt.Errorf("Error putting S3 website: %s", err) 1041 } 1042 1043 return nil 1044 } 1045 1046 func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error { 1047 bucket := d.Get("bucket").(string) 1048 deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)} 1049 1050 log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput) 1051 1052 _, err := s3conn.DeleteBucketWebsite(deleteInput) 1053 if err != nil { 1054 return fmt.Errorf("Error deleting S3 website: %s", err) 1055 } 1056 1057 d.Set("website_endpoint", "") 1058 d.Set("website_domain", "") 1059 1060 return nil 1061 } 1062 1063 func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) { 1064 // If the bucket doesn't have a website configuration, return an empty 1065 // endpoint 1066 if _, ok := d.GetOk("website"); !ok { 1067 return nil, nil 1068 } 1069 1070 bucket := d.Get("bucket").(string) 1071 1072 // Lookup the region for this bucket 1073 location, err := s3conn.GetBucketLocation( 1074 &s3.GetBucketLocationInput{ 1075 Bucket: aws.String(bucket), 1076 }, 1077 ) 1078 if err != nil { 1079 return nil, err 1080 } 1081 var region string 1082 if location.LocationConstraint != nil { 1083 region = *location.LocationConstraint 1084 } 1085 1086 return WebsiteEndpoint(bucket, region), nil 1087 } 1088 1089 func WebsiteEndpoint(bucket string, region string) *S3Website { 1090 domain := WebsiteDomainUrl(region) 1091 return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} 1092 } 1093 1094 func WebsiteDomainUrl(region string) string { 1095 region = normalizeRegion(region) 1096 1097 // Frankfurt(and probably future) regions uses different syntax for website endpoints 1098 // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html 1099 if region == "eu-central-1" || region == "ap-south-1" { 1100 return fmt.Sprintf("s3-website.%s.amazonaws.com", region) 1101 } 1102 1103 return fmt.Sprintf("s3-website-%s.amazonaws.com", region) 1104 } 1105 1106 func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1107 acl := d.Get("acl").(string) 1108 bucket := d.Get("bucket").(string) 1109 1110 i := &s3.PutBucketAclInput{ 1111 Bucket: aws.String(bucket), 1112 ACL: aws.String(acl), 1113 } 1114 log.Printf("[DEBUG] S3 put bucket ACL: %#v", i) 1115 1116 _, err := s3conn.PutBucketAcl(i) 1117 if err != nil { 1118 return fmt.Errorf("Error putting S3 ACL: %s", err) 1119 } 1120 1121 return nil 1122 } 1123 1124 func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1125 v := d.Get("versioning").(*schema.Set).List() 1126 bucket := d.Get("bucket").(string) 1127 vc := &s3.VersioningConfiguration{} 1128 1129 if len(v) > 0 { 1130 c := v[0].(map[string]interface{}) 1131 1132 if c["enabled"].(bool) { 1133 vc.Status = aws.String(s3.BucketVersioningStatusEnabled) 1134 } else { 1135 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1136 } 1137 } else { 1138 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1139 } 1140 1141 i := &s3.PutBucketVersioningInput{ 1142 Bucket: aws.String(bucket), 1143 VersioningConfiguration: vc, 1144 } 1145 log.Printf("[DEBUG] S3 put bucket versioning: %#v", i) 1146 1147 _, err := s3conn.PutBucketVersioning(i) 1148 if err != nil { 1149 return fmt.Errorf("Error putting S3 versioning: %s", err) 1150 } 1151 1152 return nil 1153 } 1154 1155 func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1156 logging := d.Get("logging").(*schema.Set).List() 1157 bucket := d.Get("bucket").(string) 1158 loggingStatus := &s3.BucketLoggingStatus{} 1159 1160 if len(logging) > 0 { 1161 c := logging[0].(map[string]interface{}) 1162 1163 loggingEnabled := &s3.LoggingEnabled{} 1164 if val, ok := c["target_bucket"]; ok { 1165 loggingEnabled.TargetBucket = aws.String(val.(string)) 1166 } 1167 if val, ok := c["target_prefix"]; ok { 1168 loggingEnabled.TargetPrefix = aws.String(val.(string)) 1169 } 1170 1171 loggingStatus.LoggingEnabled = loggingEnabled 1172 } 1173 1174 i := &s3.PutBucketLoggingInput{ 1175 Bucket: aws.String(bucket), 1176 BucketLoggingStatus: loggingStatus, 1177 } 1178 log.Printf("[DEBUG] S3 put bucket logging: %#v", i) 1179 1180 _, err := s3conn.PutBucketLogging(i) 1181 if err != nil { 1182 return fmt.Errorf("Error putting S3 logging: %s", err) 1183 } 1184 1185 return nil 1186 } 1187 1188 func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1189 bucket := d.Get("bucket").(string) 1190 enableAcceleration := d.Get("acceleration_status").(string) 1191 1192 i := &s3.PutBucketAccelerateConfigurationInput{ 1193 Bucket: aws.String(bucket), 1194 AccelerateConfiguration: &s3.AccelerateConfiguration{ 1195 Status: aws.String(enableAcceleration), 1196 }, 1197 } 1198 log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i) 1199 1200 _, err := s3conn.PutBucketAccelerateConfiguration(i) 1201 if err != nil { 1202 return fmt.Errorf("Error putting S3 acceleration: %s", err) 1203 } 1204 1205 return nil 1206 } 1207 1208 func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1209 bucket := d.Get("bucket").(string) 1210 payer := d.Get("request_payer").(string) 1211 1212 i := &s3.PutBucketRequestPaymentInput{ 1213 Bucket: aws.String(bucket), 1214 RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ 1215 Payer: aws.String(payer), 1216 }, 1217 } 1218 log.Printf("[DEBUG] S3 put bucket request payer: %#v", i) 1219 1220 _, err := s3conn.PutBucketRequestPayment(i) 1221 if err != nil { 1222 return fmt.Errorf("Error putting S3 request payer: %s", err) 1223 } 1224 1225 return nil 1226 } 1227 1228 func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1229 bucket := d.Get("bucket").(string) 1230 1231 lifecycleRules := d.Get("lifecycle_rule").([]interface{}) 1232 1233 rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) 1234 1235 for i, lifecycleRule := range lifecycleRules { 1236 r := lifecycleRule.(map[string]interface{}) 1237 1238 rule := &s3.LifecycleRule{ 1239 Prefix: aws.String(r["prefix"].(string)), 1240 } 1241 1242 // ID 1243 if val, ok := r["id"].(string); ok && val != "" { 1244 rule.ID = aws.String(val) 1245 } else { 1246 rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) 1247 } 1248 1249 // Enabled 1250 if val, ok := r["enabled"].(bool); ok && val { 1251 rule.Status = aws.String(s3.ExpirationStatusEnabled) 1252 } else { 1253 rule.Status = aws.String(s3.ExpirationStatusDisabled) 1254 } 1255 1256 // AbortIncompleteMultipartUpload 1257 if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { 1258 rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ 1259 DaysAfterInitiation: aws.Int64(int64(val)), 1260 } 1261 } 1262 1263 // Expiration 1264 expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List() 1265 if len(expiration) > 0 { 1266 e := expiration[0].(map[string]interface{}) 1267 i := &s3.LifecycleExpiration{} 1268 1269 if val, ok := e["date"].(string); ok && val != "" { 1270 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1271 if err != nil { 1272 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1273 } 1274 i.Date = aws.Time(t) 1275 } else if val, ok := e["days"].(int); ok && val > 0 { 1276 i.Days = aws.Int64(int64(val)) 1277 } else if val, ok := e["expired_object_delete_marker"].(bool); ok { 1278 i.ExpiredObjectDeleteMarker = aws.Bool(val) 1279 } 1280 rule.Expiration = i 1281 } 1282 1283 // NoncurrentVersionExpiration 1284 nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List() 1285 if len(nc_expiration) > 0 { 1286 e := nc_expiration[0].(map[string]interface{}) 1287 1288 if val, ok := e["days"].(int); ok && val > 0 { 1289 rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ 1290 NoncurrentDays: aws.Int64(int64(val)), 1291 } 1292 } 1293 } 1294 1295 // Transitions 1296 transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() 1297 if len(transitions) > 0 { 1298 rule.Transitions = make([]*s3.Transition, 0, len(transitions)) 1299 for _, transition := range transitions { 1300 transition := transition.(map[string]interface{}) 1301 i := &s3.Transition{} 1302 if val, ok := transition["date"].(string); ok && val != "" { 1303 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1304 if err != nil { 1305 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1306 } 1307 i.Date = aws.Time(t) 1308 } else if val, ok := transition["days"].(int); ok && val > 0 { 1309 i.Days = aws.Int64(int64(val)) 1310 } 1311 if val, ok := transition["storage_class"].(string); ok && val != "" { 1312 i.StorageClass = aws.String(val) 1313 } 1314 1315 rule.Transitions = append(rule.Transitions, i) 1316 } 1317 } 1318 // NoncurrentVersionTransitions 1319 nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() 1320 if len(nc_transitions) > 0 { 1321 rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) 1322 for _, transition := range nc_transitions { 1323 transition := transition.(map[string]interface{}) 1324 i := &s3.NoncurrentVersionTransition{} 1325 if val, ok := transition["days"].(int); ok && val > 0 { 1326 i.NoncurrentDays = aws.Int64(int64(val)) 1327 } 1328 if val, ok := transition["storage_class"].(string); ok && val != "" { 1329 i.StorageClass = aws.String(val) 1330 } 1331 1332 rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) 1333 } 1334 } 1335 1336 rules = append(rules, rule) 1337 } 1338 1339 i := &s3.PutBucketLifecycleConfigurationInput{ 1340 Bucket: aws.String(bucket), 1341 LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ 1342 Rules: rules, 1343 }, 1344 } 1345 1346 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1347 if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil { 1348 return resource.NonRetryableError(err) 1349 } 1350 return nil 1351 }) 1352 if err != nil { 1353 return fmt.Errorf("Error putting S3 lifecycle: %s", err) 1354 } 1355 1356 return nil 1357 } 1358 1359 func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { 1360 withNulls, err := json.Marshal(w) 1361 if err != nil { 1362 return "", err 1363 } 1364 1365 var rules []map[string]interface{} 1366 if err := json.Unmarshal(withNulls, &rules); err != nil { 1367 return "", err 1368 } 1369 1370 var cleanRules []map[string]interface{} 1371 for _, rule := range rules { 1372 cleanRules = append(cleanRules, removeNil(rule)) 1373 } 1374 1375 withoutNulls, err := json.Marshal(cleanRules) 1376 if err != nil { 1377 return "", err 1378 } 1379 1380 return string(withoutNulls), nil 1381 } 1382 1383 func removeNil(data map[string]interface{}) map[string]interface{} { 1384 withoutNil := make(map[string]interface{}) 1385 1386 for k, v := range data { 1387 if v == nil { 1388 continue 1389 } 1390 1391 switch v.(type) { 1392 case map[string]interface{}: 1393 withoutNil[k] = removeNil(v.(map[string]interface{})) 1394 default: 1395 withoutNil[k] = v 1396 } 1397 } 1398 1399 return withoutNil 1400 } 1401 1402 func normalizeJson(jsonString interface{}) string { 1403 if jsonString == nil || jsonString == "" { 1404 return "" 1405 } 1406 var j interface{} 1407 err := json.Unmarshal([]byte(jsonString.(string)), &j) 1408 if err != nil { 1409 return fmt.Sprintf("Error parsing JSON: %s", err) 1410 } 1411 b, _ := json.Marshal(j) 1412 return string(b[:]) 1413 } 1414 1415 func normalizeRegion(region string) string { 1416 // Default to us-east-1 if the bucket doesn't have a region: 1417 // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html 1418 if region == "" { 1419 region = "us-east-1" 1420 } 1421 1422 return region 1423 } 1424 1425 func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) { 1426 validTypes := map[string]struct{}{ 1427 "Enabled": struct{}{}, 1428 "Suspended": struct{}{}, 1429 } 1430 1431 if _, ok := validTypes[v.(string)]; !ok { 1432 errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended")) 1433 } 1434 return 1435 } 1436 1437 func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) { 1438 value := v.(string) 1439 if value != s3.PayerRequester && value != s3.PayerBucketOwner { 1440 errors = append(errors, fmt.Errorf( 1441 "%q contains an invalid Request Payer type %q. Valid types are either %q or %q", 1442 k, value, s3.PayerRequester, s3.PayerBucketOwner)) 1443 } 1444 return 1445 } 1446 1447 func expirationHash(v interface{}) int { 1448 var buf bytes.Buffer 1449 m := v.(map[string]interface{}) 1450 if v, ok := m["date"]; ok { 1451 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1452 } 1453 if v, ok := m["days"]; ok { 1454 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1455 } 1456 if v, ok := m["expired_object_delete_marker"]; ok { 1457 buf.WriteString(fmt.Sprintf("%t-", v.(bool))) 1458 } 1459 return hashcode.String(buf.String()) 1460 } 1461 1462 func transitionHash(v interface{}) int { 1463 var buf bytes.Buffer 1464 m := v.(map[string]interface{}) 1465 if v, ok := m["date"]; ok { 1466 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1467 } 1468 if v, ok := m["days"]; ok { 1469 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1470 } 1471 if v, ok := m["storage_class"]; ok { 1472 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1473 } 1474 return hashcode.String(buf.String()) 1475 } 1476 1477 type S3Website struct { 1478 Endpoint, Domain string 1479 }