github.com/kwoods/terraform@v0.6.11-0.20160809170336-13497db7138e/builtin/providers/aws/resource_aws_s3_bucket.go (about) 1 package aws 2 3 import ( 4 "bytes" 5 "encoding/json" 6 "fmt" 7 "log" 8 "net/url" 9 "time" 10 11 "github.com/hashicorp/terraform/helper/resource" 12 "github.com/hashicorp/terraform/helper/schema" 13 14 "github.com/aws/aws-sdk-go/aws" 15 "github.com/aws/aws-sdk-go/aws/awserr" 16 "github.com/aws/aws-sdk-go/service/s3" 17 "github.com/hashicorp/terraform/helper/hashcode" 18 ) 19 20 func resourceAwsS3Bucket() *schema.Resource { 21 return &schema.Resource{ 22 Create: resourceAwsS3BucketCreate, 23 Read: resourceAwsS3BucketRead, 24 Update: resourceAwsS3BucketUpdate, 25 Delete: resourceAwsS3BucketDelete, 26 27 Schema: map[string]*schema.Schema{ 28 "bucket": &schema.Schema{ 29 Type: schema.TypeString, 30 Required: true, 31 ForceNew: true, 32 }, 33 34 "arn": &schema.Schema{ 35 Type: schema.TypeString, 36 Optional: true, 37 Computed: true, 38 }, 39 40 "acl": &schema.Schema{ 41 Type: schema.TypeString, 42 Default: "private", 43 Optional: true, 44 }, 45 46 "policy": &schema.Schema{ 47 Type: schema.TypeString, 48 Optional: true, 49 StateFunc: normalizeJson, 50 }, 51 52 "cors_rule": &schema.Schema{ 53 Type: schema.TypeList, 54 Optional: true, 55 Elem: &schema.Resource{ 56 Schema: map[string]*schema.Schema{ 57 "allowed_headers": &schema.Schema{ 58 Type: schema.TypeList, 59 Optional: true, 60 Elem: &schema.Schema{Type: schema.TypeString}, 61 }, 62 "allowed_methods": &schema.Schema{ 63 Type: schema.TypeList, 64 Required: true, 65 Elem: &schema.Schema{Type: schema.TypeString}, 66 }, 67 "allowed_origins": &schema.Schema{ 68 Type: schema.TypeList, 69 Required: true, 70 Elem: &schema.Schema{Type: schema.TypeString}, 71 }, 72 "expose_headers": &schema.Schema{ 73 Type: schema.TypeList, 74 Optional: true, 75 Elem: &schema.Schema{Type: schema.TypeString}, 76 }, 77 "max_age_seconds": &schema.Schema{ 78 Type: schema.TypeInt, 79 Optional: true, 80 }, 81 }, 82 }, 83 }, 84 85 "website": &schema.Schema{ 86 Type: schema.TypeList, 87 Optional: true, 88 Elem: &schema.Resource{ 89 Schema: map[string]*schema.Schema{ 90 "index_document": &schema.Schema{ 91 Type: schema.TypeString, 92 Optional: true, 93 }, 94 95 "error_document": &schema.Schema{ 96 Type: schema.TypeString, 97 Optional: true, 98 }, 99 100 "redirect_all_requests_to": &schema.Schema{ 101 Type: schema.TypeString, 102 ConflictsWith: []string{ 103 "website.0.index_document", 104 "website.0.error_document", 105 "website.0.routing_rules", 106 }, 107 Optional: true, 108 }, 109 110 "routing_rules": &schema.Schema{ 111 Type: schema.TypeString, 112 Optional: true, 113 StateFunc: normalizeJson, 114 }, 115 }, 116 }, 117 }, 118 119 "hosted_zone_id": &schema.Schema{ 120 Type: schema.TypeString, 121 Optional: true, 122 Computed: true, 123 }, 124 125 "region": &schema.Schema{ 126 Type: schema.TypeString, 127 Optional: true, 128 Computed: true, 129 }, 130 "website_endpoint": &schema.Schema{ 131 Type: schema.TypeString, 132 Optional: true, 133 Computed: true, 134 }, 135 "website_domain": &schema.Schema{ 136 Type: schema.TypeString, 137 Optional: true, 138 Computed: true, 139 }, 140 141 "versioning": &schema.Schema{ 142 Type: schema.TypeSet, 143 Optional: true, 144 Elem: &schema.Resource{ 145 Schema: map[string]*schema.Schema{ 146 "enabled": &schema.Schema{ 147 Type: schema.TypeBool, 148 Optional: true, 149 Default: false, 150 }, 151 }, 152 }, 153 Set: func(v interface{}) int { 154 var buf bytes.Buffer 155 m := v.(map[string]interface{}) 156 buf.WriteString(fmt.Sprintf("%t-", m["enabled"].(bool))) 157 158 return hashcode.String(buf.String()) 159 }, 160 }, 161 162 "logging": &schema.Schema{ 163 Type: schema.TypeSet, 164 Optional: true, 165 Elem: &schema.Resource{ 166 Schema: map[string]*schema.Schema{ 167 "target_bucket": &schema.Schema{ 168 Type: schema.TypeString, 169 Required: true, 170 }, 171 "target_prefix": &schema.Schema{ 172 Type: schema.TypeString, 173 Optional: true, 174 }, 175 }, 176 }, 177 Set: func(v interface{}) int { 178 var buf bytes.Buffer 179 m := v.(map[string]interface{}) 180 buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"])) 181 buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"])) 182 return hashcode.String(buf.String()) 183 }, 184 }, 185 186 "lifecycle_rule": &schema.Schema{ 187 Type: schema.TypeList, 188 Optional: true, 189 Elem: &schema.Resource{ 190 Schema: map[string]*schema.Schema{ 191 "id": &schema.Schema{ 192 Type: schema.TypeString, 193 Optional: true, 194 Computed: true, 195 ValidateFunc: validateS3BucketLifecycleRuleId, 196 }, 197 "prefix": &schema.Schema{ 198 Type: schema.TypeString, 199 Required: true, 200 }, 201 "enabled": &schema.Schema{ 202 Type: schema.TypeBool, 203 Required: true, 204 }, 205 "abort_incomplete_multipart_upload_days": &schema.Schema{ 206 Type: schema.TypeInt, 207 Optional: true, 208 }, 209 "expiration": &schema.Schema{ 210 Type: schema.TypeSet, 211 Optional: true, 212 Set: expirationHash, 213 Elem: &schema.Resource{ 214 Schema: map[string]*schema.Schema{ 215 "date": &schema.Schema{ 216 Type: schema.TypeString, 217 Optional: true, 218 ValidateFunc: validateS3BucketLifecycleTimestamp, 219 }, 220 "days": &schema.Schema{ 221 Type: schema.TypeInt, 222 Optional: true, 223 }, 224 "expired_object_delete_marker": &schema.Schema{ 225 Type: schema.TypeBool, 226 Optional: true, 227 }, 228 }, 229 }, 230 }, 231 "noncurrent_version_expiration": &schema.Schema{ 232 Type: schema.TypeSet, 233 Optional: true, 234 Set: expirationHash, 235 Elem: &schema.Resource{ 236 Schema: map[string]*schema.Schema{ 237 "days": &schema.Schema{ 238 Type: schema.TypeInt, 239 Optional: true, 240 }, 241 }, 242 }, 243 }, 244 "transition": &schema.Schema{ 245 Type: schema.TypeSet, 246 Optional: true, 247 Set: transitionHash, 248 Elem: &schema.Resource{ 249 Schema: map[string]*schema.Schema{ 250 "date": &schema.Schema{ 251 Type: schema.TypeString, 252 Optional: true, 253 ValidateFunc: validateS3BucketLifecycleTimestamp, 254 }, 255 "days": &schema.Schema{ 256 Type: schema.TypeInt, 257 Optional: true, 258 }, 259 "storage_class": &schema.Schema{ 260 Type: schema.TypeString, 261 Required: true, 262 ValidateFunc: validateS3BucketLifecycleStorageClass, 263 }, 264 }, 265 }, 266 }, 267 "noncurrent_version_transition": &schema.Schema{ 268 Type: schema.TypeSet, 269 Optional: true, 270 Set: transitionHash, 271 Elem: &schema.Resource{ 272 Schema: map[string]*schema.Schema{ 273 "days": &schema.Schema{ 274 Type: schema.TypeInt, 275 Optional: true, 276 }, 277 "storage_class": &schema.Schema{ 278 Type: schema.TypeString, 279 Required: true, 280 ValidateFunc: validateS3BucketLifecycleStorageClass, 281 }, 282 }, 283 }, 284 }, 285 }, 286 }, 287 }, 288 289 "tags": tagsSchema(), 290 291 "force_destroy": &schema.Schema{ 292 Type: schema.TypeBool, 293 Optional: true, 294 Default: false, 295 }, 296 297 "acceleration_status": &schema.Schema{ 298 Type: schema.TypeString, 299 Optional: true, 300 Computed: true, 301 ValidateFunc: validateS3BucketAccelerationStatus, 302 }, 303 }, 304 } 305 } 306 307 func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { 308 s3conn := meta.(*AWSClient).s3conn 309 awsRegion := meta.(*AWSClient).region 310 311 // Get the bucket and acl 312 bucket := d.Get("bucket").(string) 313 acl := d.Get("acl").(string) 314 315 log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl) 316 317 req := &s3.CreateBucketInput{ 318 Bucket: aws.String(bucket), 319 ACL: aws.String(acl), 320 } 321 322 // Special case us-east-1 region and do not set the LocationConstraint. 323 // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html 324 if awsRegion != "us-east-1" { 325 req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ 326 LocationConstraint: aws.String(awsRegion), 327 } 328 } 329 330 err := resource.Retry(5*time.Minute, func() *resource.RetryError { 331 log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket) 332 _, err := s3conn.CreateBucket(req) 333 if awsErr, ok := err.(awserr.Error); ok { 334 if awsErr.Code() == "OperationAborted" { 335 log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err) 336 return resource.RetryableError( 337 fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s", 338 bucket, err)) 339 } 340 } 341 if err != nil { 342 return resource.NonRetryableError(err) 343 } 344 345 return nil 346 }) 347 348 if err != nil { 349 return fmt.Errorf("Error creating S3 bucket: %s", err) 350 } 351 352 // Assign the bucket name as the resource ID 353 d.SetId(bucket) 354 355 return resourceAwsS3BucketUpdate(d, meta) 356 } 357 358 func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { 359 s3conn := meta.(*AWSClient).s3conn 360 if err := setTagsS3(s3conn, d); err != nil { 361 return err 362 } 363 364 if d.HasChange("policy") { 365 if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil { 366 return err 367 } 368 } 369 370 if d.HasChange("cors_rule") { 371 if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil { 372 return err 373 } 374 } 375 376 if d.HasChange("website") { 377 if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { 378 return err 379 } 380 } 381 382 if d.HasChange("versioning") { 383 if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil { 384 return err 385 } 386 } 387 if d.HasChange("acl") { 388 if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil { 389 return err 390 } 391 } 392 393 if d.HasChange("logging") { 394 if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil { 395 return err 396 } 397 } 398 399 if d.HasChange("lifecycle_rule") { 400 if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil { 401 return err 402 } 403 } 404 405 if d.HasChange("acceleration_status") { 406 if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil { 407 return err 408 } 409 } 410 411 return resourceAwsS3BucketRead(d, meta) 412 } 413 414 func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { 415 s3conn := meta.(*AWSClient).s3conn 416 417 var err error 418 _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ 419 Bucket: aws.String(d.Id()), 420 }) 421 if err != nil { 422 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { 423 log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) 424 d.SetId("") 425 return nil 426 } else { 427 // some of the AWS SDK's errors can be empty strings, so let's add 428 // some additional context. 429 return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) 430 } 431 } 432 433 // In the import case, we won't have this 434 if _, ok := d.GetOk("bucket"); !ok { 435 d.Set("bucket", d.Id()) 436 } 437 438 // Read the policy 439 pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ 440 Bucket: aws.String(d.Id()), 441 }) 442 log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol) 443 if err != nil { 444 if err := d.Set("policy", ""); err != nil { 445 return err 446 } 447 } else { 448 if v := pol.Policy; v == nil { 449 if err := d.Set("policy", ""); err != nil { 450 return err 451 } 452 } else if err := d.Set("policy", normalizeJson(*v)); err != nil { 453 return err 454 } 455 } 456 457 // Read the CORS 458 cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ 459 Bucket: aws.String(d.Id()), 460 }) 461 log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) 462 if err != nil { 463 rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) 464 for _, ruleObject := range cors.CORSRules { 465 rule := make(map[string]interface{}) 466 rule["allowed_headers"] = ruleObject.AllowedHeaders 467 rule["allowed_methods"] = ruleObject.AllowedMethods 468 rule["allowed_origins"] = ruleObject.AllowedOrigins 469 rule["expose_headers"] = ruleObject.ExposeHeaders 470 rule["max_age_seconds"] = ruleObject.MaxAgeSeconds 471 rules = append(rules, rule) 472 } 473 if err := d.Set("cors_rule", rules); err != nil { 474 return fmt.Errorf("error reading S3 bucket \"%s\" CORS rules: %s", d.Id(), err) 475 } 476 } 477 478 // Read the website configuration 479 ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ 480 Bucket: aws.String(d.Id()), 481 }) 482 var websites []map[string]interface{} 483 if err == nil { 484 w := make(map[string]interface{}) 485 486 if v := ws.IndexDocument; v != nil { 487 w["index_document"] = *v.Suffix 488 } 489 490 if v := ws.ErrorDocument; v != nil { 491 w["error_document"] = *v.Key 492 } 493 494 if v := ws.RedirectAllRequestsTo; v != nil { 495 if v.Protocol == nil { 496 w["redirect_all_requests_to"] = *v.HostName 497 } else { 498 var host string 499 var path string 500 parsedHostName, err := url.Parse(*v.HostName) 501 if err == nil { 502 host = parsedHostName.Host 503 path = parsedHostName.Path 504 } else { 505 host = *v.HostName 506 path = "" 507 } 508 509 w["redirect_all_requests_to"] = (&url.URL{ 510 Host: host, 511 Path: path, 512 Scheme: *v.Protocol, 513 }).String() 514 } 515 } 516 517 if v := ws.RoutingRules; v != nil { 518 rr, err := normalizeRoutingRules(v) 519 if err != nil { 520 return fmt.Errorf("Error while marshaling routing rules: %s", err) 521 } 522 w["routing_rules"] = rr 523 } 524 525 websites = append(websites, w) 526 } 527 if err := d.Set("website", websites); err != nil { 528 return err 529 } 530 531 // Read the versioning configuration 532 versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{ 533 Bucket: aws.String(d.Id()), 534 }) 535 if err != nil { 536 return err 537 } 538 log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning) 539 if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled { 540 vcl := make([]map[string]interface{}, 0, 1) 541 vc := make(map[string]interface{}) 542 if *versioning.Status == s3.BucketVersioningStatusEnabled { 543 vc["enabled"] = true 544 } else { 545 vc["enabled"] = false 546 } 547 vcl = append(vcl, vc) 548 if err := d.Set("versioning", vcl); err != nil { 549 return err 550 } 551 } 552 553 //read the acceleration status 554 accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{ 555 Bucket: aws.String(d.Id()), 556 }) 557 log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) 558 if err != nil { 559 // Amazon S3 Transfer Acceleration might not be supported in the 560 // given region, for example, China (Beijing) and the Government 561 // Cloud does not support this feature at the moment. 562 if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" { 563 return err 564 } 565 log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Accelaration is not supported in the region: %s", d.Id(), meta.(*AWSClient).region) 566 } else { 567 log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) 568 d.Set("acceleration_status", accelerate.Status) 569 } 570 571 // Read the logging configuration 572 logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ 573 Bucket: aws.String(d.Id()), 574 }) 575 if err != nil { 576 return err 577 } 578 log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging) 579 if v := logging.LoggingEnabled; v != nil { 580 lcl := make([]map[string]interface{}, 0, 1) 581 lc := make(map[string]interface{}) 582 if *v.TargetBucket != "" { 583 lc["target_bucket"] = *v.TargetBucket 584 } 585 if *v.TargetPrefix != "" { 586 lc["target_prefix"] = *v.TargetPrefix 587 } 588 lcl = append(lcl, lc) 589 if err := d.Set("logging", lcl); err != nil { 590 return err 591 } 592 } 593 594 // Read the lifecycle configuration 595 lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{ 596 Bucket: aws.String(d.Id()), 597 }) 598 if err != nil { 599 if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { 600 return err 601 } 602 } 603 log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle) 604 if len(lifecycle.Rules) > 0 { 605 rules := make([]map[string]interface{}, 0, len(lifecycle.Rules)) 606 607 for _, lifecycleRule := range lifecycle.Rules { 608 rule := make(map[string]interface{}) 609 610 // ID 611 if lifecycleRule.ID != nil && *lifecycleRule.ID != "" { 612 rule["id"] = *lifecycleRule.ID 613 } 614 // Prefix 615 if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" { 616 rule["prefix"] = *lifecycleRule.Prefix 617 } 618 // Enabled 619 if lifecycleRule.Status != nil { 620 if *lifecycleRule.Status == s3.ExpirationStatusEnabled { 621 rule["enabled"] = true 622 } else { 623 rule["enabled"] = false 624 } 625 } 626 627 // AbortIncompleteMultipartUploadDays 628 if lifecycleRule.AbortIncompleteMultipartUpload != nil { 629 if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { 630 rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation) 631 } 632 } 633 634 // expiration 635 if lifecycleRule.Expiration != nil { 636 e := make(map[string]interface{}) 637 if lifecycleRule.Expiration.Date != nil { 638 e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02") 639 } 640 if lifecycleRule.Expiration.Days != nil { 641 e["days"] = int(*lifecycleRule.Expiration.Days) 642 } 643 if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { 644 e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker 645 } 646 rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 647 } 648 // noncurrent_version_expiration 649 if lifecycleRule.NoncurrentVersionExpiration != nil { 650 e := make(map[string]interface{}) 651 if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { 652 e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays) 653 } 654 rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e}) 655 } 656 //// transition 657 if len(lifecycleRule.Transitions) > 0 { 658 transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) 659 for _, v := range lifecycleRule.Transitions { 660 t := make(map[string]interface{}) 661 if v.Date != nil { 662 t["date"] = (*v.Date).Format("2006-01-02") 663 } 664 if v.Days != nil { 665 t["days"] = int(*v.Days) 666 } 667 if v.StorageClass != nil { 668 t["storage_class"] = *v.StorageClass 669 } 670 transitions = append(transitions, t) 671 } 672 rule["transition"] = schema.NewSet(transitionHash, transitions) 673 } 674 // noncurrent_version_transition 675 if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { 676 transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) 677 for _, v := range lifecycleRule.NoncurrentVersionTransitions { 678 t := make(map[string]interface{}) 679 if v.NoncurrentDays != nil { 680 t["days"] = int(*v.NoncurrentDays) 681 } 682 if v.StorageClass != nil { 683 t["storage_class"] = *v.StorageClass 684 } 685 transitions = append(transitions, t) 686 } 687 rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) 688 } 689 690 rules = append(rules, rule) 691 } 692 693 if err := d.Set("lifecycle_rule", rules); err != nil { 694 return err 695 } 696 } 697 698 // Add the region as an attribute 699 location, err := s3conn.GetBucketLocation( 700 &s3.GetBucketLocationInput{ 701 Bucket: aws.String(d.Id()), 702 }, 703 ) 704 if err != nil { 705 return err 706 } 707 var region string 708 if location.LocationConstraint != nil { 709 region = *location.LocationConstraint 710 } 711 region = normalizeRegion(region) 712 if err := d.Set("region", region); err != nil { 713 return err 714 } 715 716 // Add the hosted zone ID for this bucket's region as an attribute 717 hostedZoneID := HostedZoneIDForRegion(region) 718 if err := d.Set("hosted_zone_id", hostedZoneID); err != nil { 719 return err 720 } 721 722 // Add website_endpoint as an attribute 723 websiteEndpoint, err := websiteEndpoint(s3conn, d) 724 if err != nil { 725 return err 726 } 727 if websiteEndpoint != nil { 728 if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil { 729 return err 730 } 731 if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil { 732 return err 733 } 734 } 735 736 tagSet, err := getTagSetS3(s3conn, d.Id()) 737 if err != nil { 738 return err 739 } 740 741 if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { 742 return err 743 } 744 745 d.Set("arn", fmt.Sprint("arn:aws:s3:::", d.Id())) 746 747 return nil 748 } 749 750 func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { 751 s3conn := meta.(*AWSClient).s3conn 752 753 log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id()) 754 _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{ 755 Bucket: aws.String(d.Id()), 756 }) 757 if err != nil { 758 ec2err, ok := err.(awserr.Error) 759 if ok && ec2err.Code() == "BucketNotEmpty" { 760 if d.Get("force_destroy").(bool) { 761 // bucket may have things delete them 762 log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) 763 764 bucket := d.Get("bucket").(string) 765 resp, err := s3conn.ListObjectVersions( 766 &s3.ListObjectVersionsInput{ 767 Bucket: aws.String(bucket), 768 }, 769 ) 770 771 if err != nil { 772 return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) 773 } 774 775 objectsToDelete := make([]*s3.ObjectIdentifier, 0) 776 777 if len(resp.DeleteMarkers) != 0 { 778 779 for _, v := range resp.DeleteMarkers { 780 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 781 Key: v.Key, 782 VersionId: v.VersionId, 783 }) 784 } 785 } 786 787 if len(resp.Versions) != 0 { 788 for _, v := range resp.Versions { 789 objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ 790 Key: v.Key, 791 VersionId: v.VersionId, 792 }) 793 } 794 } 795 796 params := &s3.DeleteObjectsInput{ 797 Bucket: aws.String(bucket), 798 Delete: &s3.Delete{ 799 Objects: objectsToDelete, 800 }, 801 } 802 803 _, err = s3conn.DeleteObjects(params) 804 805 if err != nil { 806 return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) 807 } 808 809 // this line recurses until all objects are deleted or an error is returned 810 return resourceAwsS3BucketDelete(d, meta) 811 } 812 } 813 return fmt.Errorf("Error deleting S3 Bucket: %s", err) 814 } 815 return nil 816 } 817 818 func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 819 bucket := d.Get("bucket").(string) 820 policy := d.Get("policy").(string) 821 822 if policy != "" { 823 log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) 824 825 params := &s3.PutBucketPolicyInput{ 826 Bucket: aws.String(bucket), 827 Policy: aws.String(policy), 828 } 829 830 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 831 if _, err := s3conn.PutBucketPolicy(params); err != nil { 832 if awserr, ok := err.(awserr.Error); ok { 833 if awserr.Code() == "MalformedPolicy" { 834 return resource.RetryableError(awserr) 835 } 836 } 837 return resource.NonRetryableError(err) 838 } 839 return nil 840 }) 841 842 if err != nil { 843 return fmt.Errorf("Error putting S3 policy: %s", err) 844 } 845 } else { 846 log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy) 847 _, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ 848 Bucket: aws.String(bucket), 849 }) 850 851 if err != nil { 852 return fmt.Errorf("Error deleting S3 policy: %s", err) 853 } 854 } 855 856 return nil 857 } 858 859 func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 860 bucket := d.Get("bucket").(string) 861 rawCors := d.Get("cors_rule").([]interface{}) 862 863 if len(rawCors) == 0 { 864 // Delete CORS 865 log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket) 866 _, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ 867 Bucket: aws.String(bucket), 868 }) 869 if err != nil { 870 return fmt.Errorf("Error deleting S3 CORS: %s", err) 871 } 872 } else { 873 // Put CORS 874 rules := make([]*s3.CORSRule, 0, len(rawCors)) 875 for _, cors := range rawCors { 876 corsMap := cors.(map[string]interface{}) 877 r := &s3.CORSRule{} 878 for k, v := range corsMap { 879 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v) 880 if k == "max_age_seconds" { 881 r.MaxAgeSeconds = aws.Int64(int64(v.(int))) 882 } else { 883 vMap := make([]*string, len(v.([]interface{}))) 884 for i, vv := range v.([]interface{}) { 885 str := vv.(string) 886 vMap[i] = aws.String(str) 887 } 888 switch k { 889 case "allowed_headers": 890 r.AllowedHeaders = vMap 891 case "allowed_methods": 892 r.AllowedMethods = vMap 893 case "allowed_origins": 894 r.AllowedOrigins = vMap 895 case "expose_headers": 896 r.ExposeHeaders = vMap 897 } 898 } 899 } 900 rules = append(rules, r) 901 } 902 corsInput := &s3.PutBucketCorsInput{ 903 Bucket: aws.String(bucket), 904 CORSConfiguration: &s3.CORSConfiguration{ 905 CORSRules: rules, 906 }, 907 } 908 log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput) 909 _, err := s3conn.PutBucketCors(corsInput) 910 if err != nil { 911 return fmt.Errorf("Error putting S3 CORS: %s", err) 912 } 913 } 914 915 return nil 916 } 917 918 func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 919 ws := d.Get("website").([]interface{}) 920 921 if len(ws) == 1 { 922 var w map[string]interface{} 923 if ws[0] != nil { 924 w = ws[0].(map[string]interface{}) 925 } else { 926 w = make(map[string]interface{}) 927 } 928 return resourceAwsS3BucketWebsitePut(s3conn, d, w) 929 } else if len(ws) == 0 { 930 return resourceAwsS3BucketWebsiteDelete(s3conn, d) 931 } else { 932 return fmt.Errorf("Cannot specify more than one website.") 933 } 934 } 935 936 func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error { 937 bucket := d.Get("bucket").(string) 938 939 var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string 940 if v, ok := website["index_document"]; ok { 941 indexDocument = v.(string) 942 } 943 if v, ok := website["error_document"]; ok { 944 errorDocument = v.(string) 945 } 946 if v, ok := website["redirect_all_requests_to"]; ok { 947 redirectAllRequestsTo = v.(string) 948 } 949 if v, ok := website["routing_rules"]; ok { 950 routingRules = v.(string) 951 } 952 953 if indexDocument == "" && redirectAllRequestsTo == "" { 954 return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.") 955 } 956 957 websiteConfiguration := &s3.WebsiteConfiguration{} 958 959 if indexDocument != "" { 960 websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)} 961 } 962 963 if errorDocument != "" { 964 websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)} 965 } 966 967 if redirectAllRequestsTo != "" { 968 redirect, err := url.Parse(redirectAllRequestsTo) 969 if err == nil && redirect.Scheme != "" { 970 var redirectHostBuf bytes.Buffer 971 redirectHostBuf.WriteString(redirect.Host) 972 if redirect.Path != "" { 973 redirectHostBuf.WriteString(redirect.Path) 974 } 975 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)} 976 } else { 977 websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} 978 } 979 } 980 981 if routingRules != "" { 982 var unmarshaledRules []*s3.RoutingRule 983 if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil { 984 return err 985 } 986 websiteConfiguration.RoutingRules = unmarshaledRules 987 } 988 989 putInput := &s3.PutBucketWebsiteInput{ 990 Bucket: aws.String(bucket), 991 WebsiteConfiguration: websiteConfiguration, 992 } 993 994 log.Printf("[DEBUG] S3 put bucket website: %#v", putInput) 995 996 _, err := s3conn.PutBucketWebsite(putInput) 997 if err != nil { 998 return fmt.Errorf("Error putting S3 website: %s", err) 999 } 1000 1001 return nil 1002 } 1003 1004 func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error { 1005 bucket := d.Get("bucket").(string) 1006 deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)} 1007 1008 log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput) 1009 1010 _, err := s3conn.DeleteBucketWebsite(deleteInput) 1011 if err != nil { 1012 return fmt.Errorf("Error deleting S3 website: %s", err) 1013 } 1014 1015 d.Set("website_endpoint", "") 1016 d.Set("website_domain", "") 1017 1018 return nil 1019 } 1020 1021 func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) { 1022 // If the bucket doesn't have a website configuration, return an empty 1023 // endpoint 1024 if _, ok := d.GetOk("website"); !ok { 1025 return nil, nil 1026 } 1027 1028 bucket := d.Get("bucket").(string) 1029 1030 // Lookup the region for this bucket 1031 location, err := s3conn.GetBucketLocation( 1032 &s3.GetBucketLocationInput{ 1033 Bucket: aws.String(bucket), 1034 }, 1035 ) 1036 if err != nil { 1037 return nil, err 1038 } 1039 var region string 1040 if location.LocationConstraint != nil { 1041 region = *location.LocationConstraint 1042 } 1043 1044 return WebsiteEndpoint(bucket, region), nil 1045 } 1046 1047 func WebsiteEndpoint(bucket string, region string) *S3Website { 1048 domain := WebsiteDomainUrl(region) 1049 return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} 1050 } 1051 1052 func WebsiteDomainUrl(region string) string { 1053 region = normalizeRegion(region) 1054 1055 // Frankfurt(and probably future) regions uses different syntax for website endpoints 1056 // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html 1057 if region == "eu-central-1" || region == "ap-south-1" { 1058 return fmt.Sprintf("s3-website.%s.amazonaws.com", region) 1059 } 1060 1061 return fmt.Sprintf("s3-website-%s.amazonaws.com", region) 1062 } 1063 1064 func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1065 acl := d.Get("acl").(string) 1066 bucket := d.Get("bucket").(string) 1067 1068 i := &s3.PutBucketAclInput{ 1069 Bucket: aws.String(bucket), 1070 ACL: aws.String(acl), 1071 } 1072 log.Printf("[DEBUG] S3 put bucket ACL: %#v", i) 1073 1074 _, err := s3conn.PutBucketAcl(i) 1075 if err != nil { 1076 return fmt.Errorf("Error putting S3 ACL: %s", err) 1077 } 1078 1079 return nil 1080 } 1081 1082 func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1083 v := d.Get("versioning").(*schema.Set).List() 1084 bucket := d.Get("bucket").(string) 1085 vc := &s3.VersioningConfiguration{} 1086 1087 if len(v) > 0 { 1088 c := v[0].(map[string]interface{}) 1089 1090 if c["enabled"].(bool) { 1091 vc.Status = aws.String(s3.BucketVersioningStatusEnabled) 1092 } else { 1093 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1094 } 1095 } else { 1096 vc.Status = aws.String(s3.BucketVersioningStatusSuspended) 1097 } 1098 1099 i := &s3.PutBucketVersioningInput{ 1100 Bucket: aws.String(bucket), 1101 VersioningConfiguration: vc, 1102 } 1103 log.Printf("[DEBUG] S3 put bucket versioning: %#v", i) 1104 1105 _, err := s3conn.PutBucketVersioning(i) 1106 if err != nil { 1107 return fmt.Errorf("Error putting S3 versioning: %s", err) 1108 } 1109 1110 return nil 1111 } 1112 1113 func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1114 logging := d.Get("logging").(*schema.Set).List() 1115 bucket := d.Get("bucket").(string) 1116 loggingStatus := &s3.BucketLoggingStatus{} 1117 1118 if len(logging) > 0 { 1119 c := logging[0].(map[string]interface{}) 1120 1121 loggingEnabled := &s3.LoggingEnabled{} 1122 if val, ok := c["target_bucket"]; ok { 1123 loggingEnabled.TargetBucket = aws.String(val.(string)) 1124 } 1125 if val, ok := c["target_prefix"]; ok { 1126 loggingEnabled.TargetPrefix = aws.String(val.(string)) 1127 } 1128 1129 loggingStatus.LoggingEnabled = loggingEnabled 1130 } 1131 1132 i := &s3.PutBucketLoggingInput{ 1133 Bucket: aws.String(bucket), 1134 BucketLoggingStatus: loggingStatus, 1135 } 1136 log.Printf("[DEBUG] S3 put bucket logging: %#v", i) 1137 1138 _, err := s3conn.PutBucketLogging(i) 1139 if err != nil { 1140 return fmt.Errorf("Error putting S3 logging: %s", err) 1141 } 1142 1143 return nil 1144 } 1145 1146 func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1147 bucket := d.Get("bucket").(string) 1148 enableAcceleration := d.Get("acceleration_status").(string) 1149 1150 i := &s3.PutBucketAccelerateConfigurationInput{ 1151 Bucket: aws.String(bucket), 1152 AccelerateConfiguration: &s3.AccelerateConfiguration{ 1153 Status: aws.String(enableAcceleration), 1154 }, 1155 } 1156 log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i) 1157 1158 _, err := s3conn.PutBucketAccelerateConfiguration(i) 1159 if err != nil { 1160 return fmt.Errorf("Error putting S3 acceleration: %s", err) 1161 } 1162 1163 return nil 1164 } 1165 1166 func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { 1167 bucket := d.Get("bucket").(string) 1168 1169 lifecycleRules := d.Get("lifecycle_rule").([]interface{}) 1170 1171 rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) 1172 1173 for i, lifecycleRule := range lifecycleRules { 1174 r := lifecycleRule.(map[string]interface{}) 1175 1176 rule := &s3.LifecycleRule{ 1177 Prefix: aws.String(r["prefix"].(string)), 1178 } 1179 1180 // ID 1181 if val, ok := r["id"].(string); ok && val != "" { 1182 rule.ID = aws.String(val) 1183 } else { 1184 rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) 1185 } 1186 1187 // Enabled 1188 if val, ok := r["enabled"].(bool); ok && val { 1189 rule.Status = aws.String(s3.ExpirationStatusEnabled) 1190 } else { 1191 rule.Status = aws.String(s3.ExpirationStatusDisabled) 1192 } 1193 1194 // AbortIncompleteMultipartUpload 1195 if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { 1196 rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ 1197 DaysAfterInitiation: aws.Int64(int64(val)), 1198 } 1199 } 1200 1201 // Expiration 1202 expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List() 1203 if len(expiration) > 0 { 1204 e := expiration[0].(map[string]interface{}) 1205 i := &s3.LifecycleExpiration{} 1206 1207 if val, ok := e["date"].(string); ok && val != "" { 1208 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1209 if err != nil { 1210 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1211 } 1212 i.Date = aws.Time(t) 1213 } else if val, ok := e["days"].(int); ok && val > 0 { 1214 i.Days = aws.Int64(int64(val)) 1215 } else if val, ok := e["expired_object_delete_marker"].(bool); ok { 1216 i.ExpiredObjectDeleteMarker = aws.Bool(val) 1217 } 1218 rule.Expiration = i 1219 } 1220 1221 // NoncurrentVersionExpiration 1222 nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List() 1223 if len(nc_expiration) > 0 { 1224 e := nc_expiration[0].(map[string]interface{}) 1225 1226 if val, ok := e["days"].(int); ok && val > 0 { 1227 rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ 1228 NoncurrentDays: aws.Int64(int64(val)), 1229 } 1230 } 1231 } 1232 1233 // Transitions 1234 transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() 1235 if len(transitions) > 0 { 1236 rule.Transitions = make([]*s3.Transition, 0, len(transitions)) 1237 for _, transition := range transitions { 1238 transition := transition.(map[string]interface{}) 1239 i := &s3.Transition{} 1240 if val, ok := transition["date"].(string); ok && val != "" { 1241 t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) 1242 if err != nil { 1243 return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) 1244 } 1245 i.Date = aws.Time(t) 1246 } else if val, ok := transition["days"].(int); ok && val > 0 { 1247 i.Days = aws.Int64(int64(val)) 1248 } 1249 if val, ok := transition["storage_class"].(string); ok && val != "" { 1250 i.StorageClass = aws.String(val) 1251 } 1252 1253 rule.Transitions = append(rule.Transitions, i) 1254 } 1255 } 1256 // NoncurrentVersionTransitions 1257 nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() 1258 if len(nc_transitions) > 0 { 1259 rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) 1260 for _, transition := range nc_transitions { 1261 transition := transition.(map[string]interface{}) 1262 i := &s3.NoncurrentVersionTransition{} 1263 if val, ok := transition["days"].(int); ok && val > 0 { 1264 i.NoncurrentDays = aws.Int64(int64(val)) 1265 } 1266 if val, ok := transition["storage_class"].(string); ok && val != "" { 1267 i.StorageClass = aws.String(val) 1268 } 1269 1270 rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) 1271 } 1272 } 1273 1274 rules = append(rules, rule) 1275 } 1276 1277 i := &s3.PutBucketLifecycleConfigurationInput{ 1278 Bucket: aws.String(bucket), 1279 LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ 1280 Rules: rules, 1281 }, 1282 } 1283 1284 err := resource.Retry(1*time.Minute, func() *resource.RetryError { 1285 if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil { 1286 return resource.NonRetryableError(err) 1287 } 1288 return nil 1289 }) 1290 if err != nil { 1291 return fmt.Errorf("Error putting S3 lifecycle: %s", err) 1292 } 1293 1294 return nil 1295 } 1296 1297 func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { 1298 withNulls, err := json.Marshal(w) 1299 if err != nil { 1300 return "", err 1301 } 1302 1303 var rules []map[string]interface{} 1304 json.Unmarshal(withNulls, &rules) 1305 1306 var cleanRules []map[string]interface{} 1307 for _, rule := range rules { 1308 cleanRules = append(cleanRules, removeNil(rule)) 1309 } 1310 1311 withoutNulls, err := json.Marshal(cleanRules) 1312 if err != nil { 1313 return "", err 1314 } 1315 1316 return string(withoutNulls), nil 1317 } 1318 1319 func removeNil(data map[string]interface{}) map[string]interface{} { 1320 withoutNil := make(map[string]interface{}) 1321 1322 for k, v := range data { 1323 if v == nil { 1324 continue 1325 } 1326 1327 switch v.(type) { 1328 case map[string]interface{}: 1329 withoutNil[k] = removeNil(v.(map[string]interface{})) 1330 default: 1331 withoutNil[k] = v 1332 } 1333 } 1334 1335 return withoutNil 1336 } 1337 1338 func normalizeJson(jsonString interface{}) string { 1339 if jsonString == nil || jsonString == "" { 1340 return "" 1341 } 1342 var j interface{} 1343 err := json.Unmarshal([]byte(jsonString.(string)), &j) 1344 if err != nil { 1345 return fmt.Sprintf("Error parsing JSON: %s", err) 1346 } 1347 b, _ := json.Marshal(j) 1348 return string(b[:]) 1349 } 1350 1351 func normalizeRegion(region string) string { 1352 // Default to us-east-1 if the bucket doesn't have a region: 1353 // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html 1354 if region == "" { 1355 region = "us-east-1" 1356 } 1357 1358 return region 1359 } 1360 1361 func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) { 1362 validTypes := map[string]struct{}{ 1363 "Enabled": struct{}{}, 1364 "Suspended": struct{}{}, 1365 } 1366 1367 if _, ok := validTypes[v.(string)]; !ok { 1368 errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended")) 1369 } 1370 return 1371 } 1372 1373 func expirationHash(v interface{}) int { 1374 var buf bytes.Buffer 1375 m := v.(map[string]interface{}) 1376 if v, ok := m["date"]; ok { 1377 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1378 } 1379 if v, ok := m["days"]; ok { 1380 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1381 } 1382 if v, ok := m["expired_object_delete_marker"]; ok { 1383 buf.WriteString(fmt.Sprintf("%t-", v.(bool))) 1384 } 1385 return hashcode.String(buf.String()) 1386 } 1387 1388 func transitionHash(v interface{}) int { 1389 var buf bytes.Buffer 1390 m := v.(map[string]interface{}) 1391 if v, ok := m["date"]; ok { 1392 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1393 } 1394 if v, ok := m["days"]; ok { 1395 buf.WriteString(fmt.Sprintf("%d-", v.(int))) 1396 } 1397 if v, ok := m["storage_class"]; ok { 1398 buf.WriteString(fmt.Sprintf("%s-", v.(string))) 1399 } 1400 return hashcode.String(buf.String()) 1401 } 1402 1403 type S3Website struct { 1404 Endpoint, Domain string 1405 }