github.com/rjeczalik/terraform@v0.6.7-0.20160812060014-e251d5c7bd39/builtin/providers/aws/resource_aws_s3_bucket.go (about)

     1  package aws
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"fmt"
     7  	"log"
     8  	"net/url"
     9  	"time"
    10  
    11  	"github.com/hashicorp/terraform/helper/resource"
    12  	"github.com/hashicorp/terraform/helper/schema"
    13  
    14  	"github.com/aws/aws-sdk-go/aws"
    15  	"github.com/aws/aws-sdk-go/aws/awserr"
    16  	"github.com/aws/aws-sdk-go/service/s3"
    17  	"github.com/hashicorp/terraform/helper/hashcode"
    18  )
    19  
    20  func resourceAwsS3Bucket() *schema.Resource {
    21  	return &schema.Resource{
    22  		Create: resourceAwsS3BucketCreate,
    23  		Read:   resourceAwsS3BucketRead,
    24  		Update: resourceAwsS3BucketUpdate,
    25  		Delete: resourceAwsS3BucketDelete,
    26  
    27  		Schema: map[string]*schema.Schema{
    28  			"bucket": &schema.Schema{
    29  				Type:     schema.TypeString,
    30  				Required: true,
    31  				ForceNew: true,
    32  			},
    33  
    34  			"arn": &schema.Schema{
    35  				Type:     schema.TypeString,
    36  				Optional: true,
    37  				Computed: true,
    38  			},
    39  
    40  			"acl": &schema.Schema{
    41  				Type:     schema.TypeString,
    42  				Default:  "private",
    43  				Optional: true,
    44  			},
    45  
    46  			"policy": &schema.Schema{
    47  				Type:      schema.TypeString,
    48  				Optional:  true,
    49  				StateFunc: normalizeJson,
    50  			},
    51  
    52  			"cors_rule": &schema.Schema{
    53  				Type:     schema.TypeList,
    54  				Optional: true,
    55  				Elem: &schema.Resource{
    56  					Schema: map[string]*schema.Schema{
    57  						"allowed_headers": &schema.Schema{
    58  							Type:     schema.TypeList,
    59  							Optional: true,
    60  							Elem:     &schema.Schema{Type: schema.TypeString},
    61  						},
    62  						"allowed_methods": &schema.Schema{
    63  							Type:     schema.TypeList,
    64  							Required: true,
    65  							Elem:     &schema.Schema{Type: schema.TypeString},
    66  						},
    67  						"allowed_origins": &schema.Schema{
    68  							Type:     schema.TypeList,
    69  							Required: true,
    70  							Elem:     &schema.Schema{Type: schema.TypeString},
    71  						},
    72  						"expose_headers": &schema.Schema{
    73  							Type:     schema.TypeList,
    74  							Optional: true,
    75  							Elem:     &schema.Schema{Type: schema.TypeString},
    76  						},
    77  						"max_age_seconds": &schema.Schema{
    78  							Type:     schema.TypeInt,
    79  							Optional: true,
    80  						},
    81  					},
    82  				},
    83  			},
    84  
    85  			"website": &schema.Schema{
    86  				Type:     schema.TypeList,
    87  				Optional: true,
    88  				Elem: &schema.Resource{
    89  					Schema: map[string]*schema.Schema{
    90  						"index_document": &schema.Schema{
    91  							Type:     schema.TypeString,
    92  							Optional: true,
    93  						},
    94  
    95  						"error_document": &schema.Schema{
    96  							Type:     schema.TypeString,
    97  							Optional: true,
    98  						},
    99  
   100  						"redirect_all_requests_to": &schema.Schema{
   101  							Type: schema.TypeString,
   102  							ConflictsWith: []string{
   103  								"website.0.index_document",
   104  								"website.0.error_document",
   105  								"website.0.routing_rules",
   106  							},
   107  							Optional: true,
   108  						},
   109  
   110  						"routing_rules": &schema.Schema{
   111  							Type:      schema.TypeString,
   112  							Optional:  true,
   113  							StateFunc: normalizeJson,
   114  						},
   115  					},
   116  				},
   117  			},
   118  
   119  			"hosted_zone_id": &schema.Schema{
   120  				Type:     schema.TypeString,
   121  				Optional: true,
   122  				Computed: true,
   123  			},
   124  
   125  			"region": &schema.Schema{
   126  				Type:     schema.TypeString,
   127  				Optional: true,
   128  				Computed: true,
   129  			},
   130  			"website_endpoint": &schema.Schema{
   131  				Type:     schema.TypeString,
   132  				Optional: true,
   133  				Computed: true,
   134  			},
   135  			"website_domain": &schema.Schema{
   136  				Type:     schema.TypeString,
   137  				Optional: true,
   138  				Computed: true,
   139  			},
   140  
   141  			"versioning": &schema.Schema{
   142  				Type:     schema.TypeSet,
   143  				Optional: true,
   144  				Elem: &schema.Resource{
   145  					Schema: map[string]*schema.Schema{
   146  						"enabled": &schema.Schema{
   147  							Type:     schema.TypeBool,
   148  							Optional: true,
   149  							Default:  false,
   150  						},
   151  					},
   152  				},
   153  				Set: func(v interface{}) int {
   154  					var buf bytes.Buffer
   155  					m := v.(map[string]interface{})
   156  					buf.WriteString(fmt.Sprintf("%t-", m["enabled"].(bool)))
   157  
   158  					return hashcode.String(buf.String())
   159  				},
   160  			},
   161  
   162  			"logging": &schema.Schema{
   163  				Type:     schema.TypeSet,
   164  				Optional: true,
   165  				Elem: &schema.Resource{
   166  					Schema: map[string]*schema.Schema{
   167  						"target_bucket": &schema.Schema{
   168  							Type:     schema.TypeString,
   169  							Required: true,
   170  						},
   171  						"target_prefix": &schema.Schema{
   172  							Type:     schema.TypeString,
   173  							Optional: true,
   174  						},
   175  					},
   176  				},
   177  				Set: func(v interface{}) int {
   178  					var buf bytes.Buffer
   179  					m := v.(map[string]interface{})
   180  					buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"]))
   181  					buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"]))
   182  					return hashcode.String(buf.String())
   183  				},
   184  			},
   185  
   186  			"lifecycle_rule": &schema.Schema{
   187  				Type:     schema.TypeList,
   188  				Optional: true,
   189  				Elem: &schema.Resource{
   190  					Schema: map[string]*schema.Schema{
   191  						"id": &schema.Schema{
   192  							Type:         schema.TypeString,
   193  							Optional:     true,
   194  							Computed:     true,
   195  							ValidateFunc: validateS3BucketLifecycleRuleId,
   196  						},
   197  						"prefix": &schema.Schema{
   198  							Type:     schema.TypeString,
   199  							Required: true,
   200  						},
   201  						"enabled": &schema.Schema{
   202  							Type:     schema.TypeBool,
   203  							Required: true,
   204  						},
   205  						"abort_incomplete_multipart_upload_days": &schema.Schema{
   206  							Type:     schema.TypeInt,
   207  							Optional: true,
   208  						},
   209  						"expiration": &schema.Schema{
   210  							Type:     schema.TypeSet,
   211  							Optional: true,
   212  							Set:      expirationHash,
   213  							Elem: &schema.Resource{
   214  								Schema: map[string]*schema.Schema{
   215  									"date": &schema.Schema{
   216  										Type:         schema.TypeString,
   217  										Optional:     true,
   218  										ValidateFunc: validateS3BucketLifecycleTimestamp,
   219  									},
   220  									"days": &schema.Schema{
   221  										Type:     schema.TypeInt,
   222  										Optional: true,
   223  									},
   224  									"expired_object_delete_marker": &schema.Schema{
   225  										Type:     schema.TypeBool,
   226  										Optional: true,
   227  									},
   228  								},
   229  							},
   230  						},
   231  						"noncurrent_version_expiration": &schema.Schema{
   232  							Type:     schema.TypeSet,
   233  							Optional: true,
   234  							Set:      expirationHash,
   235  							Elem: &schema.Resource{
   236  								Schema: map[string]*schema.Schema{
   237  									"days": &schema.Schema{
   238  										Type:     schema.TypeInt,
   239  										Optional: true,
   240  									},
   241  								},
   242  							},
   243  						},
   244  						"transition": &schema.Schema{
   245  							Type:     schema.TypeSet,
   246  							Optional: true,
   247  							Set:      transitionHash,
   248  							Elem: &schema.Resource{
   249  								Schema: map[string]*schema.Schema{
   250  									"date": &schema.Schema{
   251  										Type:         schema.TypeString,
   252  										Optional:     true,
   253  										ValidateFunc: validateS3BucketLifecycleTimestamp,
   254  									},
   255  									"days": &schema.Schema{
   256  										Type:     schema.TypeInt,
   257  										Optional: true,
   258  									},
   259  									"storage_class": &schema.Schema{
   260  										Type:         schema.TypeString,
   261  										Required:     true,
   262  										ValidateFunc: validateS3BucketLifecycleStorageClass,
   263  									},
   264  								},
   265  							},
   266  						},
   267  						"noncurrent_version_transition": &schema.Schema{
   268  							Type:     schema.TypeSet,
   269  							Optional: true,
   270  							Set:      transitionHash,
   271  							Elem: &schema.Resource{
   272  								Schema: map[string]*schema.Schema{
   273  									"days": &schema.Schema{
   274  										Type:     schema.TypeInt,
   275  										Optional: true,
   276  									},
   277  									"storage_class": &schema.Schema{
   278  										Type:         schema.TypeString,
   279  										Required:     true,
   280  										ValidateFunc: validateS3BucketLifecycleStorageClass,
   281  									},
   282  								},
   283  							},
   284  						},
   285  					},
   286  				},
   287  			},
   288  
   289  			"force_destroy": &schema.Schema{
   290  				Type:     schema.TypeBool,
   291  				Optional: true,
   292  				Default:  false,
   293  			},
   294  
   295  			"acceleration_status": &schema.Schema{
   296  				Type:         schema.TypeString,
   297  				Optional:     true,
   298  				Computed:     true,
   299  				ValidateFunc: validateS3BucketAccelerationStatus,
   300  			},
   301  
   302  			"request_payer": &schema.Schema{
   303  				Type:         schema.TypeString,
   304  				Optional:     true,
   305  				Computed:     true,
   306  				ValidateFunc: validateS3BucketRequestPayerType,
   307  			},
   308  
   309  			"tags": tagsSchema(),
   310  		},
   311  	}
   312  }
   313  
   314  func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
   315  	s3conn := meta.(*AWSClient).s3conn
   316  	awsRegion := meta.(*AWSClient).region
   317  
   318  	// Get the bucket and acl
   319  	bucket := d.Get("bucket").(string)
   320  	acl := d.Get("acl").(string)
   321  
   322  	log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
   323  
   324  	req := &s3.CreateBucketInput{
   325  		Bucket: aws.String(bucket),
   326  		ACL:    aws.String(acl),
   327  	}
   328  
   329  	// Special case us-east-1 region and do not set the LocationConstraint.
   330  	// See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html
   331  	if awsRegion != "us-east-1" {
   332  		req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
   333  			LocationConstraint: aws.String(awsRegion),
   334  		}
   335  	}
   336  
   337  	err := resource.Retry(5*time.Minute, func() *resource.RetryError {
   338  		log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket)
   339  		_, err := s3conn.CreateBucket(req)
   340  		if awsErr, ok := err.(awserr.Error); ok {
   341  			if awsErr.Code() == "OperationAborted" {
   342  				log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err)
   343  				return resource.RetryableError(
   344  					fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s",
   345  						bucket, err))
   346  			}
   347  		}
   348  		if err != nil {
   349  			return resource.NonRetryableError(err)
   350  		}
   351  
   352  		return nil
   353  	})
   354  
   355  	if err != nil {
   356  		return fmt.Errorf("Error creating S3 bucket: %s", err)
   357  	}
   358  
   359  	// Assign the bucket name as the resource ID
   360  	d.SetId(bucket)
   361  
   362  	return resourceAwsS3BucketUpdate(d, meta)
   363  }
   364  
   365  func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
   366  	s3conn := meta.(*AWSClient).s3conn
   367  	if err := setTagsS3(s3conn, d); err != nil {
   368  		return err
   369  	}
   370  
   371  	if d.HasChange("policy") {
   372  		if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil {
   373  			return err
   374  		}
   375  	}
   376  
   377  	if d.HasChange("cors_rule") {
   378  		if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil {
   379  			return err
   380  		}
   381  	}
   382  
   383  	if d.HasChange("website") {
   384  		if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil {
   385  			return err
   386  		}
   387  	}
   388  
   389  	if d.HasChange("versioning") {
   390  		if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil {
   391  			return err
   392  		}
   393  	}
   394  	if d.HasChange("acl") {
   395  		if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil {
   396  			return err
   397  		}
   398  	}
   399  
   400  	if d.HasChange("logging") {
   401  		if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil {
   402  			return err
   403  		}
   404  	}
   405  
   406  	if d.HasChange("lifecycle_rule") {
   407  		if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil {
   408  			return err
   409  		}
   410  	}
   411  
   412  	if d.HasChange("acceleration_status") {
   413  		if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil {
   414  			return err
   415  		}
   416  	}
   417  
   418  	if d.HasChange("request_payer") {
   419  		if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil {
   420  			return err
   421  		}
   422  	}
   423  
   424  	return resourceAwsS3BucketRead(d, meta)
   425  }
   426  
   427  func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
   428  	s3conn := meta.(*AWSClient).s3conn
   429  
   430  	var err error
   431  	_, err = s3conn.HeadBucket(&s3.HeadBucketInput{
   432  		Bucket: aws.String(d.Id()),
   433  	})
   434  	if err != nil {
   435  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 {
   436  			log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id())
   437  			d.SetId("")
   438  			return nil
   439  		} else {
   440  			// some of the AWS SDK's errors can be empty strings, so let's add
   441  			// some additional context.
   442  			return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err)
   443  		}
   444  	}
   445  
   446  	// In the import case, we won't have this
   447  	if _, ok := d.GetOk("bucket"); !ok {
   448  		d.Set("bucket", d.Id())
   449  	}
   450  
   451  	// Read the policy
   452  	pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{
   453  		Bucket: aws.String(d.Id()),
   454  	})
   455  	log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol)
   456  	if err != nil {
   457  		if err := d.Set("policy", ""); err != nil {
   458  			return err
   459  		}
   460  	} else {
   461  		if v := pol.Policy; v == nil {
   462  			if err := d.Set("policy", ""); err != nil {
   463  				return err
   464  			}
   465  		} else if err := d.Set("policy", normalizeJson(*v)); err != nil {
   466  			return err
   467  		}
   468  	}
   469  
   470  	// Read the CORS
   471  	cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{
   472  		Bucket: aws.String(d.Id()),
   473  	})
   474  	if err != nil {
   475  		// An S3 Bucket might not have CORS configuration set.
   476  		if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" {
   477  			return err
   478  		}
   479  		log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id())
   480  	}
   481  	log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors)
   482  	if cors.CORSRules != nil {
   483  		rules := make([]map[string]interface{}, 0, len(cors.CORSRules))
   484  		for _, ruleObject := range cors.CORSRules {
   485  			rule := make(map[string]interface{})
   486  			rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders)
   487  			rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods)
   488  			rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins)
   489  			// Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set.
   490  			if ruleObject.AllowedOrigins != nil {
   491  				rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders)
   492  			}
   493  			if ruleObject.MaxAgeSeconds != nil {
   494  				rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds)
   495  			}
   496  			rules = append(rules, rule)
   497  		}
   498  		if err := d.Set("cors_rule", rules); err != nil {
   499  			return err
   500  		}
   501  	}
   502  
   503  	// Read the website configuration
   504  	ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{
   505  		Bucket: aws.String(d.Id()),
   506  	})
   507  	var websites []map[string]interface{}
   508  	if err == nil {
   509  		w := make(map[string]interface{})
   510  
   511  		if v := ws.IndexDocument; v != nil {
   512  			w["index_document"] = *v.Suffix
   513  		}
   514  
   515  		if v := ws.ErrorDocument; v != nil {
   516  			w["error_document"] = *v.Key
   517  		}
   518  
   519  		if v := ws.RedirectAllRequestsTo; v != nil {
   520  			if v.Protocol == nil {
   521  				w["redirect_all_requests_to"] = *v.HostName
   522  			} else {
   523  				var host string
   524  				var path string
   525  				parsedHostName, err := url.Parse(*v.HostName)
   526  				if err == nil {
   527  					host = parsedHostName.Host
   528  					path = parsedHostName.Path
   529  				} else {
   530  					host = *v.HostName
   531  					path = ""
   532  				}
   533  
   534  				w["redirect_all_requests_to"] = (&url.URL{
   535  					Host:   host,
   536  					Path:   path,
   537  					Scheme: *v.Protocol,
   538  				}).String()
   539  			}
   540  		}
   541  
   542  		if v := ws.RoutingRules; v != nil {
   543  			rr, err := normalizeRoutingRules(v)
   544  			if err != nil {
   545  				return fmt.Errorf("Error while marshaling routing rules: %s", err)
   546  			}
   547  			w["routing_rules"] = rr
   548  		}
   549  
   550  		websites = append(websites, w)
   551  	}
   552  	if err := d.Set("website", websites); err != nil {
   553  		return err
   554  	}
   555  
   556  	// Read the versioning configuration
   557  	versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{
   558  		Bucket: aws.String(d.Id()),
   559  	})
   560  	if err != nil {
   561  		return err
   562  	}
   563  	log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning)
   564  	if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled {
   565  		vcl := make([]map[string]interface{}, 0, 1)
   566  		vc := make(map[string]interface{})
   567  		if *versioning.Status == s3.BucketVersioningStatusEnabled {
   568  			vc["enabled"] = true
   569  		} else {
   570  			vc["enabled"] = false
   571  		}
   572  		vcl = append(vcl, vc)
   573  		if err := d.Set("versioning", vcl); err != nil {
   574  			return err
   575  		}
   576  	}
   577  
   578  	//read the acceleration status
   579  	accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{
   580  		Bucket: aws.String(d.Id()),
   581  	})
   582  	if err != nil {
   583  		// Amazon S3 Transfer Acceleration might not be supported in the
   584  		// given region, for example, China (Beijing) and the Government
   585  		// Cloud does not support this feature at the moment.
   586  		if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" {
   587  			return err
   588  		}
   589  		log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Accelaration is not supported in the region: %s", d.Id(), meta.(*AWSClient).region)
   590  	} else {
   591  		log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate)
   592  		d.Set("acceleration_status", accelerate.Status)
   593  	}
   594  
   595  	// Read the request payer configuration.
   596  	payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{
   597  		Bucket: aws.String(d.Id()),
   598  	})
   599  	if err != nil {
   600  		return err
   601  	}
   602  	log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer)
   603  	if payer.Payer != nil {
   604  		if err := d.Set("request_payer", *payer.Payer); err != nil {
   605  			return err
   606  		}
   607  	}
   608  
   609  	// Read the logging configuration
   610  	logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{
   611  		Bucket: aws.String(d.Id()),
   612  	})
   613  	if err != nil {
   614  		return err
   615  	}
   616  
   617  	log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging)
   618  	if v := logging.LoggingEnabled; v != nil {
   619  		lcl := make([]map[string]interface{}, 0, 1)
   620  		lc := make(map[string]interface{})
   621  		if *v.TargetBucket != "" {
   622  			lc["target_bucket"] = *v.TargetBucket
   623  		}
   624  		if *v.TargetPrefix != "" {
   625  			lc["target_prefix"] = *v.TargetPrefix
   626  		}
   627  		lcl = append(lcl, lc)
   628  		if err := d.Set("logging", lcl); err != nil {
   629  			return err
   630  		}
   631  	}
   632  
   633  	// Read the lifecycle configuration
   634  	lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{
   635  		Bucket: aws.String(d.Id()),
   636  	})
   637  	if err != nil {
   638  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
   639  			return err
   640  		}
   641  	}
   642  	log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle)
   643  	if len(lifecycle.Rules) > 0 {
   644  		rules := make([]map[string]interface{}, 0, len(lifecycle.Rules))
   645  
   646  		for _, lifecycleRule := range lifecycle.Rules {
   647  			rule := make(map[string]interface{})
   648  
   649  			// ID
   650  			if lifecycleRule.ID != nil && *lifecycleRule.ID != "" {
   651  				rule["id"] = *lifecycleRule.ID
   652  			}
   653  			// Prefix
   654  			if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" {
   655  				rule["prefix"] = *lifecycleRule.Prefix
   656  			}
   657  			// Enabled
   658  			if lifecycleRule.Status != nil {
   659  				if *lifecycleRule.Status == s3.ExpirationStatusEnabled {
   660  					rule["enabled"] = true
   661  				} else {
   662  					rule["enabled"] = false
   663  				}
   664  			}
   665  
   666  			// AbortIncompleteMultipartUploadDays
   667  			if lifecycleRule.AbortIncompleteMultipartUpload != nil {
   668  				if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil {
   669  					rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
   670  				}
   671  			}
   672  
   673  			// expiration
   674  			if lifecycleRule.Expiration != nil {
   675  				e := make(map[string]interface{})
   676  				if lifecycleRule.Expiration.Date != nil {
   677  					e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02")
   678  				}
   679  				if lifecycleRule.Expiration.Days != nil {
   680  					e["days"] = int(*lifecycleRule.Expiration.Days)
   681  				}
   682  				if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil {
   683  					e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker
   684  				}
   685  				rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e})
   686  			}
   687  			// noncurrent_version_expiration
   688  			if lifecycleRule.NoncurrentVersionExpiration != nil {
   689  				e := make(map[string]interface{})
   690  				if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil {
   691  					e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)
   692  				}
   693  				rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e})
   694  			}
   695  			//// transition
   696  			if len(lifecycleRule.Transitions) > 0 {
   697  				transitions := make([]interface{}, 0, len(lifecycleRule.Transitions))
   698  				for _, v := range lifecycleRule.Transitions {
   699  					t := make(map[string]interface{})
   700  					if v.Date != nil {
   701  						t["date"] = (*v.Date).Format("2006-01-02")
   702  					}
   703  					if v.Days != nil {
   704  						t["days"] = int(*v.Days)
   705  					}
   706  					if v.StorageClass != nil {
   707  						t["storage_class"] = *v.StorageClass
   708  					}
   709  					transitions = append(transitions, t)
   710  				}
   711  				rule["transition"] = schema.NewSet(transitionHash, transitions)
   712  			}
   713  			// noncurrent_version_transition
   714  			if len(lifecycleRule.NoncurrentVersionTransitions) > 0 {
   715  				transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions))
   716  				for _, v := range lifecycleRule.NoncurrentVersionTransitions {
   717  					t := make(map[string]interface{})
   718  					if v.NoncurrentDays != nil {
   719  						t["days"] = int(*v.NoncurrentDays)
   720  					}
   721  					if v.StorageClass != nil {
   722  						t["storage_class"] = *v.StorageClass
   723  					}
   724  					transitions = append(transitions, t)
   725  				}
   726  				rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions)
   727  			}
   728  
   729  			rules = append(rules, rule)
   730  		}
   731  
   732  		if err := d.Set("lifecycle_rule", rules); err != nil {
   733  			return err
   734  		}
   735  	}
   736  
   737  	// Add the region as an attribute
   738  	location, err := s3conn.GetBucketLocation(
   739  		&s3.GetBucketLocationInput{
   740  			Bucket: aws.String(d.Id()),
   741  		},
   742  	)
   743  	if err != nil {
   744  		return err
   745  	}
   746  	var region string
   747  	if location.LocationConstraint != nil {
   748  		region = *location.LocationConstraint
   749  	}
   750  	region = normalizeRegion(region)
   751  	if err := d.Set("region", region); err != nil {
   752  		return err
   753  	}
   754  
   755  	// Add the hosted zone ID for this bucket's region as an attribute
   756  	hostedZoneID := HostedZoneIDForRegion(region)
   757  	if err := d.Set("hosted_zone_id", hostedZoneID); err != nil {
   758  		return err
   759  	}
   760  
   761  	// Add website_endpoint as an attribute
   762  	websiteEndpoint, err := websiteEndpoint(s3conn, d)
   763  	if err != nil {
   764  		return err
   765  	}
   766  	if websiteEndpoint != nil {
   767  		if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil {
   768  			return err
   769  		}
   770  		if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil {
   771  			return err
   772  		}
   773  	}
   774  
   775  	tagSet, err := getTagSetS3(s3conn, d.Id())
   776  	if err != nil {
   777  		return err
   778  	}
   779  
   780  	if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil {
   781  		return err
   782  	}
   783  
   784  	d.Set("arn", fmt.Sprint("arn:aws:s3:::", d.Id()))
   785  
   786  	return nil
   787  }
   788  
   789  func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error {
   790  	s3conn := meta.(*AWSClient).s3conn
   791  
   792  	log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id())
   793  	_, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{
   794  		Bucket: aws.String(d.Id()),
   795  	})
   796  	if err != nil {
   797  		ec2err, ok := err.(awserr.Error)
   798  		if ok && ec2err.Code() == "BucketNotEmpty" {
   799  			if d.Get("force_destroy").(bool) {
   800  				// bucket may have things delete them
   801  				log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err)
   802  
   803  				bucket := d.Get("bucket").(string)
   804  				resp, err := s3conn.ListObjectVersions(
   805  					&s3.ListObjectVersionsInput{
   806  						Bucket: aws.String(bucket),
   807  					},
   808  				)
   809  
   810  				if err != nil {
   811  					return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err)
   812  				}
   813  
   814  				objectsToDelete := make([]*s3.ObjectIdentifier, 0)
   815  
   816  				if len(resp.DeleteMarkers) != 0 {
   817  
   818  					for _, v := range resp.DeleteMarkers {
   819  						objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
   820  							Key:       v.Key,
   821  							VersionId: v.VersionId,
   822  						})
   823  					}
   824  				}
   825  
   826  				if len(resp.Versions) != 0 {
   827  					for _, v := range resp.Versions {
   828  						objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
   829  							Key:       v.Key,
   830  							VersionId: v.VersionId,
   831  						})
   832  					}
   833  				}
   834  
   835  				params := &s3.DeleteObjectsInput{
   836  					Bucket: aws.String(bucket),
   837  					Delete: &s3.Delete{
   838  						Objects: objectsToDelete,
   839  					},
   840  				}
   841  
   842  				_, err = s3conn.DeleteObjects(params)
   843  
   844  				if err != nil {
   845  					return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err)
   846  				}
   847  
   848  				// this line recurses until all objects are deleted or an error is returned
   849  				return resourceAwsS3BucketDelete(d, meta)
   850  			}
   851  		}
   852  		return fmt.Errorf("Error deleting S3 Bucket: %s", err)
   853  	}
   854  	return nil
   855  }
   856  
   857  func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
   858  	bucket := d.Get("bucket").(string)
   859  	policy := d.Get("policy").(string)
   860  
   861  	if policy != "" {
   862  		log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy)
   863  
   864  		params := &s3.PutBucketPolicyInput{
   865  			Bucket: aws.String(bucket),
   866  			Policy: aws.String(policy),
   867  		}
   868  
   869  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
   870  			if _, err := s3conn.PutBucketPolicy(params); err != nil {
   871  				if awserr, ok := err.(awserr.Error); ok {
   872  					if awserr.Code() == "MalformedPolicy" {
   873  						return resource.RetryableError(awserr)
   874  					}
   875  				}
   876  				return resource.NonRetryableError(err)
   877  			}
   878  			return nil
   879  		})
   880  
   881  		if err != nil {
   882  			return fmt.Errorf("Error putting S3 policy: %s", err)
   883  		}
   884  	} else {
   885  		log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy)
   886  		_, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
   887  			Bucket: aws.String(bucket),
   888  		})
   889  
   890  		if err != nil {
   891  			return fmt.Errorf("Error deleting S3 policy: %s", err)
   892  		}
   893  	}
   894  
   895  	return nil
   896  }
   897  
   898  func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
   899  	bucket := d.Get("bucket").(string)
   900  	rawCors := d.Get("cors_rule").([]interface{})
   901  
   902  	if len(rawCors) == 0 {
   903  		// Delete CORS
   904  		log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket)
   905  		_, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{
   906  			Bucket: aws.String(bucket),
   907  		})
   908  		if err != nil {
   909  			return fmt.Errorf("Error deleting S3 CORS: %s", err)
   910  		}
   911  	} else {
   912  		// Put CORS
   913  		rules := make([]*s3.CORSRule, 0, len(rawCors))
   914  		for _, cors := range rawCors {
   915  			corsMap := cors.(map[string]interface{})
   916  			r := &s3.CORSRule{}
   917  			for k, v := range corsMap {
   918  				log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v)
   919  				if k == "max_age_seconds" {
   920  					r.MaxAgeSeconds = aws.Int64(int64(v.(int)))
   921  				} else {
   922  					vMap := make([]*string, len(v.([]interface{})))
   923  					for i, vv := range v.([]interface{}) {
   924  						str := vv.(string)
   925  						vMap[i] = aws.String(str)
   926  					}
   927  					switch k {
   928  					case "allowed_headers":
   929  						r.AllowedHeaders = vMap
   930  					case "allowed_methods":
   931  						r.AllowedMethods = vMap
   932  					case "allowed_origins":
   933  						r.AllowedOrigins = vMap
   934  					case "expose_headers":
   935  						r.ExposeHeaders = vMap
   936  					}
   937  				}
   938  			}
   939  			rules = append(rules, r)
   940  		}
   941  		corsInput := &s3.PutBucketCorsInput{
   942  			Bucket: aws.String(bucket),
   943  			CORSConfiguration: &s3.CORSConfiguration{
   944  				CORSRules: rules,
   945  			},
   946  		}
   947  		log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput)
   948  		_, err := s3conn.PutBucketCors(corsInput)
   949  		if err != nil {
   950  			return fmt.Errorf("Error putting S3 CORS: %s", err)
   951  		}
   952  	}
   953  
   954  	return nil
   955  }
   956  
   957  func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
   958  	ws := d.Get("website").([]interface{})
   959  
   960  	if len(ws) == 1 {
   961  		var w map[string]interface{}
   962  		if ws[0] != nil {
   963  			w = ws[0].(map[string]interface{})
   964  		} else {
   965  			w = make(map[string]interface{})
   966  		}
   967  		return resourceAwsS3BucketWebsitePut(s3conn, d, w)
   968  	} else if len(ws) == 0 {
   969  		return resourceAwsS3BucketWebsiteDelete(s3conn, d)
   970  	} else {
   971  		return fmt.Errorf("Cannot specify more than one website.")
   972  	}
   973  }
   974  
   975  func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error {
   976  	bucket := d.Get("bucket").(string)
   977  
   978  	var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string
   979  	if v, ok := website["index_document"]; ok {
   980  		indexDocument = v.(string)
   981  	}
   982  	if v, ok := website["error_document"]; ok {
   983  		errorDocument = v.(string)
   984  	}
   985  	if v, ok := website["redirect_all_requests_to"]; ok {
   986  		redirectAllRequestsTo = v.(string)
   987  	}
   988  	if v, ok := website["routing_rules"]; ok {
   989  		routingRules = v.(string)
   990  	}
   991  
   992  	if indexDocument == "" && redirectAllRequestsTo == "" {
   993  		return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.")
   994  	}
   995  
   996  	websiteConfiguration := &s3.WebsiteConfiguration{}
   997  
   998  	if indexDocument != "" {
   999  		websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)}
  1000  	}
  1001  
  1002  	if errorDocument != "" {
  1003  		websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)}
  1004  	}
  1005  
  1006  	if redirectAllRequestsTo != "" {
  1007  		redirect, err := url.Parse(redirectAllRequestsTo)
  1008  		if err == nil && redirect.Scheme != "" {
  1009  			var redirectHostBuf bytes.Buffer
  1010  			redirectHostBuf.WriteString(redirect.Host)
  1011  			if redirect.Path != "" {
  1012  				redirectHostBuf.WriteString(redirect.Path)
  1013  			}
  1014  			websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)}
  1015  		} else {
  1016  			websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)}
  1017  		}
  1018  	}
  1019  
  1020  	if routingRules != "" {
  1021  		var unmarshaledRules []*s3.RoutingRule
  1022  		if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil {
  1023  			return err
  1024  		}
  1025  		websiteConfiguration.RoutingRules = unmarshaledRules
  1026  	}
  1027  
  1028  	putInput := &s3.PutBucketWebsiteInput{
  1029  		Bucket:               aws.String(bucket),
  1030  		WebsiteConfiguration: websiteConfiguration,
  1031  	}
  1032  
  1033  	log.Printf("[DEBUG] S3 put bucket website: %#v", putInput)
  1034  
  1035  	_, err := s3conn.PutBucketWebsite(putInput)
  1036  	if err != nil {
  1037  		return fmt.Errorf("Error putting S3 website: %s", err)
  1038  	}
  1039  
  1040  	return nil
  1041  }
  1042  
  1043  func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error {
  1044  	bucket := d.Get("bucket").(string)
  1045  	deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)}
  1046  
  1047  	log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput)
  1048  
  1049  	_, err := s3conn.DeleteBucketWebsite(deleteInput)
  1050  	if err != nil {
  1051  		return fmt.Errorf("Error deleting S3 website: %s", err)
  1052  	}
  1053  
  1054  	d.Set("website_endpoint", "")
  1055  	d.Set("website_domain", "")
  1056  
  1057  	return nil
  1058  }
  1059  
  1060  func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) {
  1061  	// If the bucket doesn't have a website configuration, return an empty
  1062  	// endpoint
  1063  	if _, ok := d.GetOk("website"); !ok {
  1064  		return nil, nil
  1065  	}
  1066  
  1067  	bucket := d.Get("bucket").(string)
  1068  
  1069  	// Lookup the region for this bucket
  1070  	location, err := s3conn.GetBucketLocation(
  1071  		&s3.GetBucketLocationInput{
  1072  			Bucket: aws.String(bucket),
  1073  		},
  1074  	)
  1075  	if err != nil {
  1076  		return nil, err
  1077  	}
  1078  	var region string
  1079  	if location.LocationConstraint != nil {
  1080  		region = *location.LocationConstraint
  1081  	}
  1082  
  1083  	return WebsiteEndpoint(bucket, region), nil
  1084  }
  1085  
  1086  func WebsiteEndpoint(bucket string, region string) *S3Website {
  1087  	domain := WebsiteDomainUrl(region)
  1088  	return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain}
  1089  }
  1090  
  1091  func WebsiteDomainUrl(region string) string {
  1092  	region = normalizeRegion(region)
  1093  
  1094  	// Frankfurt(and probably future) regions uses different syntax for website endpoints
  1095  	// http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
  1096  	if region == "eu-central-1" || region == "ap-south-1" {
  1097  		return fmt.Sprintf("s3-website.%s.amazonaws.com", region)
  1098  	}
  1099  
  1100  	return fmt.Sprintf("s3-website-%s.amazonaws.com", region)
  1101  }
  1102  
  1103  func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1104  	acl := d.Get("acl").(string)
  1105  	bucket := d.Get("bucket").(string)
  1106  
  1107  	i := &s3.PutBucketAclInput{
  1108  		Bucket: aws.String(bucket),
  1109  		ACL:    aws.String(acl),
  1110  	}
  1111  	log.Printf("[DEBUG] S3 put bucket ACL: %#v", i)
  1112  
  1113  	_, err := s3conn.PutBucketAcl(i)
  1114  	if err != nil {
  1115  		return fmt.Errorf("Error putting S3 ACL: %s", err)
  1116  	}
  1117  
  1118  	return nil
  1119  }
  1120  
  1121  func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1122  	v := d.Get("versioning").(*schema.Set).List()
  1123  	bucket := d.Get("bucket").(string)
  1124  	vc := &s3.VersioningConfiguration{}
  1125  
  1126  	if len(v) > 0 {
  1127  		c := v[0].(map[string]interface{})
  1128  
  1129  		if c["enabled"].(bool) {
  1130  			vc.Status = aws.String(s3.BucketVersioningStatusEnabled)
  1131  		} else {
  1132  			vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
  1133  		}
  1134  	} else {
  1135  		vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
  1136  	}
  1137  
  1138  	i := &s3.PutBucketVersioningInput{
  1139  		Bucket:                  aws.String(bucket),
  1140  		VersioningConfiguration: vc,
  1141  	}
  1142  	log.Printf("[DEBUG] S3 put bucket versioning: %#v", i)
  1143  
  1144  	_, err := s3conn.PutBucketVersioning(i)
  1145  	if err != nil {
  1146  		return fmt.Errorf("Error putting S3 versioning: %s", err)
  1147  	}
  1148  
  1149  	return nil
  1150  }
  1151  
  1152  func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1153  	logging := d.Get("logging").(*schema.Set).List()
  1154  	bucket := d.Get("bucket").(string)
  1155  	loggingStatus := &s3.BucketLoggingStatus{}
  1156  
  1157  	if len(logging) > 0 {
  1158  		c := logging[0].(map[string]interface{})
  1159  
  1160  		loggingEnabled := &s3.LoggingEnabled{}
  1161  		if val, ok := c["target_bucket"]; ok {
  1162  			loggingEnabled.TargetBucket = aws.String(val.(string))
  1163  		}
  1164  		if val, ok := c["target_prefix"]; ok {
  1165  			loggingEnabled.TargetPrefix = aws.String(val.(string))
  1166  		}
  1167  
  1168  		loggingStatus.LoggingEnabled = loggingEnabled
  1169  	}
  1170  
  1171  	i := &s3.PutBucketLoggingInput{
  1172  		Bucket:              aws.String(bucket),
  1173  		BucketLoggingStatus: loggingStatus,
  1174  	}
  1175  	log.Printf("[DEBUG] S3 put bucket logging: %#v", i)
  1176  
  1177  	_, err := s3conn.PutBucketLogging(i)
  1178  	if err != nil {
  1179  		return fmt.Errorf("Error putting S3 logging: %s", err)
  1180  	}
  1181  
  1182  	return nil
  1183  }
  1184  
  1185  func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1186  	bucket := d.Get("bucket").(string)
  1187  	enableAcceleration := d.Get("acceleration_status").(string)
  1188  
  1189  	i := &s3.PutBucketAccelerateConfigurationInput{
  1190  		Bucket: aws.String(bucket),
  1191  		AccelerateConfiguration: &s3.AccelerateConfiguration{
  1192  			Status: aws.String(enableAcceleration),
  1193  		},
  1194  	}
  1195  	log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i)
  1196  
  1197  	_, err := s3conn.PutBucketAccelerateConfiguration(i)
  1198  	if err != nil {
  1199  		return fmt.Errorf("Error putting S3 acceleration: %s", err)
  1200  	}
  1201  
  1202  	return nil
  1203  }
  1204  
  1205  func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1206  	bucket := d.Get("bucket").(string)
  1207  	payer := d.Get("request_payer").(string)
  1208  
  1209  	i := &s3.PutBucketRequestPaymentInput{
  1210  		Bucket: aws.String(bucket),
  1211  		RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{
  1212  			Payer: aws.String(payer),
  1213  		},
  1214  	}
  1215  	log.Printf("[DEBUG] S3 put bucket request payer: %#v", i)
  1216  
  1217  	_, err := s3conn.PutBucketRequestPayment(i)
  1218  	if err != nil {
  1219  		return fmt.Errorf("Error putting S3 request payer: %s", err)
  1220  	}
  1221  
  1222  	return nil
  1223  }
  1224  
  1225  func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1226  	bucket := d.Get("bucket").(string)
  1227  
  1228  	lifecycleRules := d.Get("lifecycle_rule").([]interface{})
  1229  
  1230  	rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules))
  1231  
  1232  	for i, lifecycleRule := range lifecycleRules {
  1233  		r := lifecycleRule.(map[string]interface{})
  1234  
  1235  		rule := &s3.LifecycleRule{
  1236  			Prefix: aws.String(r["prefix"].(string)),
  1237  		}
  1238  
  1239  		// ID
  1240  		if val, ok := r["id"].(string); ok && val != "" {
  1241  			rule.ID = aws.String(val)
  1242  		} else {
  1243  			rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-"))
  1244  		}
  1245  
  1246  		// Enabled
  1247  		if val, ok := r["enabled"].(bool); ok && val {
  1248  			rule.Status = aws.String(s3.ExpirationStatusEnabled)
  1249  		} else {
  1250  			rule.Status = aws.String(s3.ExpirationStatusDisabled)
  1251  		}
  1252  
  1253  		// AbortIncompleteMultipartUpload
  1254  		if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 {
  1255  			rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{
  1256  				DaysAfterInitiation: aws.Int64(int64(val)),
  1257  			}
  1258  		}
  1259  
  1260  		// Expiration
  1261  		expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List()
  1262  		if len(expiration) > 0 {
  1263  			e := expiration[0].(map[string]interface{})
  1264  			i := &s3.LifecycleExpiration{}
  1265  
  1266  			if val, ok := e["date"].(string); ok && val != "" {
  1267  				t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
  1268  				if err != nil {
  1269  					return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
  1270  				}
  1271  				i.Date = aws.Time(t)
  1272  			} else if val, ok := e["days"].(int); ok && val > 0 {
  1273  				i.Days = aws.Int64(int64(val))
  1274  			} else if val, ok := e["expired_object_delete_marker"].(bool); ok {
  1275  				i.ExpiredObjectDeleteMarker = aws.Bool(val)
  1276  			}
  1277  			rule.Expiration = i
  1278  		}
  1279  
  1280  		// NoncurrentVersionExpiration
  1281  		nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List()
  1282  		if len(nc_expiration) > 0 {
  1283  			e := nc_expiration[0].(map[string]interface{})
  1284  
  1285  			if val, ok := e["days"].(int); ok && val > 0 {
  1286  				rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{
  1287  					NoncurrentDays: aws.Int64(int64(val)),
  1288  				}
  1289  			}
  1290  		}
  1291  
  1292  		// Transitions
  1293  		transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List()
  1294  		if len(transitions) > 0 {
  1295  			rule.Transitions = make([]*s3.Transition, 0, len(transitions))
  1296  			for _, transition := range transitions {
  1297  				transition := transition.(map[string]interface{})
  1298  				i := &s3.Transition{}
  1299  				if val, ok := transition["date"].(string); ok && val != "" {
  1300  					t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
  1301  					if err != nil {
  1302  						return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
  1303  					}
  1304  					i.Date = aws.Time(t)
  1305  				} else if val, ok := transition["days"].(int); ok && val > 0 {
  1306  					i.Days = aws.Int64(int64(val))
  1307  				}
  1308  				if val, ok := transition["storage_class"].(string); ok && val != "" {
  1309  					i.StorageClass = aws.String(val)
  1310  				}
  1311  
  1312  				rule.Transitions = append(rule.Transitions, i)
  1313  			}
  1314  		}
  1315  		// NoncurrentVersionTransitions
  1316  		nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List()
  1317  		if len(nc_transitions) > 0 {
  1318  			rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions))
  1319  			for _, transition := range nc_transitions {
  1320  				transition := transition.(map[string]interface{})
  1321  				i := &s3.NoncurrentVersionTransition{}
  1322  				if val, ok := transition["days"].(int); ok && val > 0 {
  1323  					i.NoncurrentDays = aws.Int64(int64(val))
  1324  				}
  1325  				if val, ok := transition["storage_class"].(string); ok && val != "" {
  1326  					i.StorageClass = aws.String(val)
  1327  				}
  1328  
  1329  				rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i)
  1330  			}
  1331  		}
  1332  
  1333  		rules = append(rules, rule)
  1334  	}
  1335  
  1336  	i := &s3.PutBucketLifecycleConfigurationInput{
  1337  		Bucket: aws.String(bucket),
  1338  		LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
  1339  			Rules: rules,
  1340  		},
  1341  	}
  1342  
  1343  	err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1344  		if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil {
  1345  			return resource.NonRetryableError(err)
  1346  		}
  1347  		return nil
  1348  	})
  1349  	if err != nil {
  1350  		return fmt.Errorf("Error putting S3 lifecycle: %s", err)
  1351  	}
  1352  
  1353  	return nil
  1354  }
  1355  
  1356  func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) {
  1357  	withNulls, err := json.Marshal(w)
  1358  	if err != nil {
  1359  		return "", err
  1360  	}
  1361  
  1362  	var rules []map[string]interface{}
  1363  	json.Unmarshal(withNulls, &rules)
  1364  
  1365  	var cleanRules []map[string]interface{}
  1366  	for _, rule := range rules {
  1367  		cleanRules = append(cleanRules, removeNil(rule))
  1368  	}
  1369  
  1370  	withoutNulls, err := json.Marshal(cleanRules)
  1371  	if err != nil {
  1372  		return "", err
  1373  	}
  1374  
  1375  	return string(withoutNulls), nil
  1376  }
  1377  
  1378  func removeNil(data map[string]interface{}) map[string]interface{} {
  1379  	withoutNil := make(map[string]interface{})
  1380  
  1381  	for k, v := range data {
  1382  		if v == nil {
  1383  			continue
  1384  		}
  1385  
  1386  		switch v.(type) {
  1387  		case map[string]interface{}:
  1388  			withoutNil[k] = removeNil(v.(map[string]interface{}))
  1389  		default:
  1390  			withoutNil[k] = v
  1391  		}
  1392  	}
  1393  
  1394  	return withoutNil
  1395  }
  1396  
  1397  func normalizeJson(jsonString interface{}) string {
  1398  	if jsonString == nil || jsonString == "" {
  1399  		return ""
  1400  	}
  1401  	var j interface{}
  1402  	err := json.Unmarshal([]byte(jsonString.(string)), &j)
  1403  	if err != nil {
  1404  		return fmt.Sprintf("Error parsing JSON: %s", err)
  1405  	}
  1406  	b, _ := json.Marshal(j)
  1407  	return string(b[:])
  1408  }
  1409  
  1410  func normalizeRegion(region string) string {
  1411  	// Default to us-east-1 if the bucket doesn't have a region:
  1412  	// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
  1413  	if region == "" {
  1414  		region = "us-east-1"
  1415  	}
  1416  
  1417  	return region
  1418  }
  1419  
  1420  func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) {
  1421  	validTypes := map[string]struct{}{
  1422  		"Enabled":   struct{}{},
  1423  		"Suspended": struct{}{},
  1424  	}
  1425  
  1426  	if _, ok := validTypes[v.(string)]; !ok {
  1427  		errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended"))
  1428  	}
  1429  	return
  1430  }
  1431  
  1432  func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) {
  1433  	value := v.(string)
  1434  	if value != s3.PayerRequester && value != s3.PayerBucketOwner {
  1435  		errors = append(errors, fmt.Errorf(
  1436  			"%q contains an invalid Request Payer type %q. Valid types are either %q or %q",
  1437  			k, value, s3.PayerRequester, s3.PayerBucketOwner))
  1438  	}
  1439  	return
  1440  }
  1441  
  1442  func expirationHash(v interface{}) int {
  1443  	var buf bytes.Buffer
  1444  	m := v.(map[string]interface{})
  1445  	if v, ok := m["date"]; ok {
  1446  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1447  	}
  1448  	if v, ok := m["days"]; ok {
  1449  		buf.WriteString(fmt.Sprintf("%d-", v.(int)))
  1450  	}
  1451  	if v, ok := m["expired_object_delete_marker"]; ok {
  1452  		buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
  1453  	}
  1454  	return hashcode.String(buf.String())
  1455  }
  1456  
  1457  func transitionHash(v interface{}) int {
  1458  	var buf bytes.Buffer
  1459  	m := v.(map[string]interface{})
  1460  	if v, ok := m["date"]; ok {
  1461  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1462  	}
  1463  	if v, ok := m["days"]; ok {
  1464  		buf.WriteString(fmt.Sprintf("%d-", v.(int)))
  1465  	}
  1466  	if v, ok := m["storage_class"]; ok {
  1467  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1468  	}
  1469  	return hashcode.String(buf.String())
  1470  }
  1471  
  1472  type S3Website struct {
  1473  	Endpoint, Domain string
  1474  }