github.com/recobe182/terraform@v0.8.5-0.20170117231232-49ab22a935b7/builtin/providers/aws/resource_aws_s3_bucket.go (about)

     1  package aws
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"fmt"
     7  	"log"
     8  	"net/url"
     9  	"regexp"
    10  	"strings"
    11  	"time"
    12  
    13  	"github.com/aws/aws-sdk-go/aws"
    14  	"github.com/aws/aws-sdk-go/aws/awserr"
    15  	"github.com/aws/aws-sdk-go/service/s3"
    16  	"github.com/hashicorp/errwrap"
    17  	"github.com/hashicorp/terraform/helper/hashcode"
    18  	"github.com/hashicorp/terraform/helper/resource"
    19  	"github.com/hashicorp/terraform/helper/schema"
    20  )
    21  
    22  func resourceAwsS3Bucket() *schema.Resource {
    23  	return &schema.Resource{
    24  		Create: resourceAwsS3BucketCreate,
    25  		Read:   resourceAwsS3BucketRead,
    26  		Update: resourceAwsS3BucketUpdate,
    27  		Delete: resourceAwsS3BucketDelete,
    28  		Importer: &schema.ResourceImporter{
    29  			State: resourceAwsS3BucketImportState,
    30  		},
    31  
    32  		Schema: map[string]*schema.Schema{
    33  			"bucket": {
    34  				Type:     schema.TypeString,
    35  				Required: true,
    36  				ForceNew: true,
    37  			},
    38  
    39  			"arn": {
    40  				Type:     schema.TypeString,
    41  				Optional: true,
    42  				Computed: true,
    43  			},
    44  
    45  			"acl": {
    46  				Type:     schema.TypeString,
    47  				Default:  "private",
    48  				Optional: true,
    49  			},
    50  
    51  			"policy": {
    52  				Type:             schema.TypeString,
    53  				Optional:         true,
    54  				ValidateFunc:     validateJsonString,
    55  				DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs,
    56  			},
    57  
    58  			"cors_rule": {
    59  				Type:     schema.TypeList,
    60  				Optional: true,
    61  				Elem: &schema.Resource{
    62  					Schema: map[string]*schema.Schema{
    63  						"allowed_headers": {
    64  							Type:     schema.TypeList,
    65  							Optional: true,
    66  							Elem:     &schema.Schema{Type: schema.TypeString},
    67  						},
    68  						"allowed_methods": {
    69  							Type:     schema.TypeList,
    70  							Required: true,
    71  							Elem:     &schema.Schema{Type: schema.TypeString},
    72  						},
    73  						"allowed_origins": {
    74  							Type:     schema.TypeList,
    75  							Required: true,
    76  							Elem:     &schema.Schema{Type: schema.TypeString},
    77  						},
    78  						"expose_headers": {
    79  							Type:     schema.TypeList,
    80  							Optional: true,
    81  							Elem:     &schema.Schema{Type: schema.TypeString},
    82  						},
    83  						"max_age_seconds": {
    84  							Type:     schema.TypeInt,
    85  							Optional: true,
    86  						},
    87  					},
    88  				},
    89  			},
    90  
    91  			"website": {
    92  				Type:     schema.TypeList,
    93  				Optional: true,
    94  				Elem: &schema.Resource{
    95  					Schema: map[string]*schema.Schema{
    96  						"index_document": {
    97  							Type:     schema.TypeString,
    98  							Optional: true,
    99  						},
   100  
   101  						"error_document": {
   102  							Type:     schema.TypeString,
   103  							Optional: true,
   104  						},
   105  
   106  						"redirect_all_requests_to": {
   107  							Type: schema.TypeString,
   108  							ConflictsWith: []string{
   109  								"website.0.index_document",
   110  								"website.0.error_document",
   111  								"website.0.routing_rules",
   112  							},
   113  							Optional: true,
   114  						},
   115  
   116  						"routing_rules": {
   117  							Type:         schema.TypeString,
   118  							Optional:     true,
   119  							ValidateFunc: validateJsonString,
   120  							StateFunc: func(v interface{}) string {
   121  								json, _ := normalizeJsonString(v)
   122  								return json
   123  							},
   124  						},
   125  					},
   126  				},
   127  			},
   128  
   129  			"hosted_zone_id": {
   130  				Type:     schema.TypeString,
   131  				Optional: true,
   132  				Computed: true,
   133  			},
   134  
   135  			"region": {
   136  				Type:     schema.TypeString,
   137  				Optional: true,
   138  				Computed: true,
   139  			},
   140  			"website_endpoint": {
   141  				Type:     schema.TypeString,
   142  				Optional: true,
   143  				Computed: true,
   144  			},
   145  			"website_domain": {
   146  				Type:     schema.TypeString,
   147  				Optional: true,
   148  				Computed: true,
   149  			},
   150  
   151  			"versioning": {
   152  				Type:     schema.TypeList,
   153  				Optional: true,
   154  				Computed: true,
   155  				MaxItems: 1,
   156  				Elem: &schema.Resource{
   157  					Schema: map[string]*schema.Schema{
   158  						"enabled": {
   159  							Type:     schema.TypeBool,
   160  							Optional: true,
   161  							Default:  false,
   162  						},
   163  						"mfa_delete": {
   164  							Type:     schema.TypeBool,
   165  							Optional: true,
   166  							Default:  false,
   167  						},
   168  					},
   169  				},
   170  			},
   171  
   172  			"logging": {
   173  				Type:     schema.TypeSet,
   174  				Optional: true,
   175  				Elem: &schema.Resource{
   176  					Schema: map[string]*schema.Schema{
   177  						"target_bucket": {
   178  							Type:     schema.TypeString,
   179  							Required: true,
   180  						},
   181  						"target_prefix": {
   182  							Type:     schema.TypeString,
   183  							Optional: true,
   184  						},
   185  					},
   186  				},
   187  				Set: func(v interface{}) int {
   188  					var buf bytes.Buffer
   189  					m := v.(map[string]interface{})
   190  					buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"]))
   191  					buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"]))
   192  					return hashcode.String(buf.String())
   193  				},
   194  			},
   195  
   196  			"lifecycle_rule": {
   197  				Type:     schema.TypeList,
   198  				Optional: true,
   199  				Elem: &schema.Resource{
   200  					Schema: map[string]*schema.Schema{
   201  						"id": {
   202  							Type:         schema.TypeString,
   203  							Optional:     true,
   204  							Computed:     true,
   205  							ValidateFunc: validateS3BucketLifecycleRuleId,
   206  						},
   207  						"prefix": {
   208  							Type:     schema.TypeString,
   209  							Required: true,
   210  						},
   211  						"enabled": {
   212  							Type:     schema.TypeBool,
   213  							Required: true,
   214  						},
   215  						"abort_incomplete_multipart_upload_days": {
   216  							Type:     schema.TypeInt,
   217  							Optional: true,
   218  						},
   219  						"expiration": {
   220  							Type:     schema.TypeSet,
   221  							Optional: true,
   222  							Set:      expirationHash,
   223  							Elem: &schema.Resource{
   224  								Schema: map[string]*schema.Schema{
   225  									"date": {
   226  										Type:         schema.TypeString,
   227  										Optional:     true,
   228  										ValidateFunc: validateS3BucketLifecycleTimestamp,
   229  									},
   230  									"days": {
   231  										Type:     schema.TypeInt,
   232  										Optional: true,
   233  									},
   234  									"expired_object_delete_marker": {
   235  										Type:     schema.TypeBool,
   236  										Optional: true,
   237  									},
   238  								},
   239  							},
   240  						},
   241  						"noncurrent_version_expiration": {
   242  							Type:     schema.TypeSet,
   243  							Optional: true,
   244  							Set:      expirationHash,
   245  							Elem: &schema.Resource{
   246  								Schema: map[string]*schema.Schema{
   247  									"days": {
   248  										Type:     schema.TypeInt,
   249  										Optional: true,
   250  									},
   251  								},
   252  							},
   253  						},
   254  						"transition": {
   255  							Type:     schema.TypeSet,
   256  							Optional: true,
   257  							Set:      transitionHash,
   258  							Elem: &schema.Resource{
   259  								Schema: map[string]*schema.Schema{
   260  									"date": {
   261  										Type:         schema.TypeString,
   262  										Optional:     true,
   263  										ValidateFunc: validateS3BucketLifecycleTimestamp,
   264  									},
   265  									"days": {
   266  										Type:     schema.TypeInt,
   267  										Optional: true,
   268  									},
   269  									"storage_class": {
   270  										Type:         schema.TypeString,
   271  										Required:     true,
   272  										ValidateFunc: validateS3BucketLifecycleStorageClass,
   273  									},
   274  								},
   275  							},
   276  						},
   277  						"noncurrent_version_transition": {
   278  							Type:     schema.TypeSet,
   279  							Optional: true,
   280  							Set:      transitionHash,
   281  							Elem: &schema.Resource{
   282  								Schema: map[string]*schema.Schema{
   283  									"days": {
   284  										Type:     schema.TypeInt,
   285  										Optional: true,
   286  									},
   287  									"storage_class": {
   288  										Type:         schema.TypeString,
   289  										Required:     true,
   290  										ValidateFunc: validateS3BucketLifecycleStorageClass,
   291  									},
   292  								},
   293  							},
   294  						},
   295  					},
   296  				},
   297  			},
   298  
   299  			"force_destroy": {
   300  				Type:     schema.TypeBool,
   301  				Optional: true,
   302  				Default:  false,
   303  			},
   304  
   305  			"acceleration_status": {
   306  				Type:         schema.TypeString,
   307  				Optional:     true,
   308  				Computed:     true,
   309  				ValidateFunc: validateS3BucketAccelerationStatus,
   310  			},
   311  
   312  			"request_payer": {
   313  				Type:         schema.TypeString,
   314  				Optional:     true,
   315  				Computed:     true,
   316  				ValidateFunc: validateS3BucketRequestPayerType,
   317  			},
   318  
   319  			"replication_configuration": {
   320  				Type:     schema.TypeList,
   321  				Optional: true,
   322  				MaxItems: 1,
   323  				Elem: &schema.Resource{
   324  					Schema: map[string]*schema.Schema{
   325  						"role": {
   326  							Type:     schema.TypeString,
   327  							Required: true,
   328  						},
   329  						"rules": {
   330  							Type:     schema.TypeSet,
   331  							Required: true,
   332  							Set:      rulesHash,
   333  							Elem: &schema.Resource{
   334  								Schema: map[string]*schema.Schema{
   335  									"id": {
   336  										Type:         schema.TypeString,
   337  										Optional:     true,
   338  										ValidateFunc: validateS3BucketReplicationRuleId,
   339  									},
   340  									"destination": {
   341  										Type:     schema.TypeSet,
   342  										MaxItems: 1,
   343  										MinItems: 1,
   344  										Required: true,
   345  										Set:      destinationHash,
   346  										Elem: &schema.Resource{
   347  											Schema: map[string]*schema.Schema{
   348  												"bucket": {
   349  													Type:         schema.TypeString,
   350  													Required:     true,
   351  													ValidateFunc: validateArn,
   352  												},
   353  												"storage_class": {
   354  													Type:         schema.TypeString,
   355  													Optional:     true,
   356  													ValidateFunc: validateS3BucketReplicationDestinationStorageClass,
   357  												},
   358  											},
   359  										},
   360  									},
   361  									"prefix": {
   362  										Type:         schema.TypeString,
   363  										Required:     true,
   364  										ValidateFunc: validateS3BucketReplicationRulePrefix,
   365  									},
   366  									"status": {
   367  										Type:         schema.TypeString,
   368  										Required:     true,
   369  										ValidateFunc: validateS3BucketReplicationRuleStatus,
   370  									},
   371  								},
   372  							},
   373  						},
   374  					},
   375  				},
   376  			},
   377  
   378  			"tags": tagsSchema(),
   379  		},
   380  	}
   381  }
   382  
   383  func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
   384  	s3conn := meta.(*AWSClient).s3conn
   385  
   386  	// Get the bucket and acl
   387  	bucket := d.Get("bucket").(string)
   388  	acl := d.Get("acl").(string)
   389  
   390  	log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
   391  
   392  	req := &s3.CreateBucketInput{
   393  		Bucket: aws.String(bucket),
   394  		ACL:    aws.String(acl),
   395  	}
   396  
   397  	var awsRegion string
   398  	if region, ok := d.GetOk("region"); ok {
   399  		awsRegion = region.(string)
   400  	} else {
   401  		awsRegion = meta.(*AWSClient).region
   402  	}
   403  	log.Printf("[DEBUG] S3 bucket create: %s, using region: %s", bucket, awsRegion)
   404  
   405  	// Special case us-east-1 region and do not set the LocationConstraint.
   406  	// See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html
   407  	if awsRegion != "us-east-1" {
   408  		req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
   409  			LocationConstraint: aws.String(awsRegion),
   410  		}
   411  	}
   412  
   413  	if err := validateS3BucketName(bucket, awsRegion); err != nil {
   414  		return fmt.Errorf("Error validating S3 bucket name: %s", err)
   415  	}
   416  
   417  	err := resource.Retry(5*time.Minute, func() *resource.RetryError {
   418  		log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket)
   419  		_, err := s3conn.CreateBucket(req)
   420  		if awsErr, ok := err.(awserr.Error); ok {
   421  			if awsErr.Code() == "OperationAborted" {
   422  				log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err)
   423  				return resource.RetryableError(
   424  					fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s",
   425  						bucket, err))
   426  			}
   427  		}
   428  		if err != nil {
   429  			return resource.NonRetryableError(err)
   430  		}
   431  
   432  		return nil
   433  	})
   434  
   435  	if err != nil {
   436  		return fmt.Errorf("Error creating S3 bucket: %s", err)
   437  	}
   438  
   439  	// Assign the bucket name as the resource ID
   440  	d.SetId(bucket)
   441  
   442  	return resourceAwsS3BucketUpdate(d, meta)
   443  }
   444  
   445  func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
   446  	s3conn := meta.(*AWSClient).s3conn
   447  	if err := setTagsS3(s3conn, d); err != nil {
   448  		return err
   449  	}
   450  
   451  	if d.HasChange("policy") {
   452  		if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil {
   453  			return err
   454  		}
   455  	}
   456  
   457  	if d.HasChange("cors_rule") {
   458  		if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil {
   459  			return err
   460  		}
   461  	}
   462  
   463  	if d.HasChange("website") {
   464  		if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil {
   465  			return err
   466  		}
   467  	}
   468  
   469  	if d.HasChange("versioning") {
   470  		if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil {
   471  			return err
   472  		}
   473  	}
   474  	if d.HasChange("acl") {
   475  		if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil {
   476  			return err
   477  		}
   478  	}
   479  
   480  	if d.HasChange("logging") {
   481  		if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil {
   482  			return err
   483  		}
   484  	}
   485  
   486  	if d.HasChange("lifecycle_rule") {
   487  		if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil {
   488  			return err
   489  		}
   490  	}
   491  
   492  	if d.HasChange("acceleration_status") {
   493  		if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil {
   494  			return err
   495  		}
   496  	}
   497  
   498  	if d.HasChange("request_payer") {
   499  		if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil {
   500  			return err
   501  		}
   502  	}
   503  
   504  	if d.HasChange("replication_configuration") {
   505  		if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil {
   506  			return err
   507  		}
   508  	}
   509  
   510  	return resourceAwsS3BucketRead(d, meta)
   511  }
   512  
   513  func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
   514  	s3conn := meta.(*AWSClient).s3conn
   515  
   516  	var err error
   517  	_, err = s3conn.HeadBucket(&s3.HeadBucketInput{
   518  		Bucket: aws.String(d.Id()),
   519  	})
   520  	if err != nil {
   521  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 {
   522  			log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id())
   523  			d.SetId("")
   524  			return nil
   525  		} else {
   526  			// some of the AWS SDK's errors can be empty strings, so let's add
   527  			// some additional context.
   528  			return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err)
   529  		}
   530  	}
   531  
   532  	// In the import case, we won't have this
   533  	if _, ok := d.GetOk("bucket"); !ok {
   534  		d.Set("bucket", d.Id())
   535  	}
   536  
   537  	// Read the policy
   538  	if _, ok := d.GetOk("policy"); ok {
   539  		pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{
   540  			Bucket: aws.String(d.Id()),
   541  		})
   542  		log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol)
   543  		if err != nil {
   544  			if err := d.Set("policy", ""); err != nil {
   545  				return err
   546  			}
   547  		} else {
   548  			if v := pol.Policy; v == nil {
   549  				if err := d.Set("policy", ""); err != nil {
   550  					return err
   551  				}
   552  			} else {
   553  				policy, err := normalizeJsonString(*v)
   554  				if err != nil {
   555  					return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err)
   556  				}
   557  				d.Set("policy", policy)
   558  			}
   559  		}
   560  	}
   561  
   562  	// Read the CORS
   563  	cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{
   564  		Bucket: aws.String(d.Id()),
   565  	})
   566  	if err != nil {
   567  		// An S3 Bucket might not have CORS configuration set.
   568  		if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" {
   569  			return err
   570  		}
   571  		log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id())
   572  	}
   573  	log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors)
   574  	if cors.CORSRules != nil {
   575  		rules := make([]map[string]interface{}, 0, len(cors.CORSRules))
   576  		for _, ruleObject := range cors.CORSRules {
   577  			rule := make(map[string]interface{})
   578  			rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders)
   579  			rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods)
   580  			rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins)
   581  			// Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set.
   582  			if ruleObject.AllowedOrigins != nil {
   583  				rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders)
   584  			}
   585  			if ruleObject.MaxAgeSeconds != nil {
   586  				rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds)
   587  			}
   588  			rules = append(rules, rule)
   589  		}
   590  		if err := d.Set("cors_rule", rules); err != nil {
   591  			return err
   592  		}
   593  	}
   594  
   595  	// Read the website configuration
   596  	ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{
   597  		Bucket: aws.String(d.Id()),
   598  	})
   599  	var websites []map[string]interface{}
   600  	if err == nil {
   601  		w := make(map[string]interface{})
   602  
   603  		if v := ws.IndexDocument; v != nil {
   604  			w["index_document"] = *v.Suffix
   605  		}
   606  
   607  		if v := ws.ErrorDocument; v != nil {
   608  			w["error_document"] = *v.Key
   609  		}
   610  
   611  		if v := ws.RedirectAllRequestsTo; v != nil {
   612  			if v.Protocol == nil {
   613  				w["redirect_all_requests_to"] = *v.HostName
   614  			} else {
   615  				var host string
   616  				var path string
   617  				parsedHostName, err := url.Parse(*v.HostName)
   618  				if err == nil {
   619  					host = parsedHostName.Host
   620  					path = parsedHostName.Path
   621  				} else {
   622  					host = *v.HostName
   623  					path = ""
   624  				}
   625  
   626  				w["redirect_all_requests_to"] = (&url.URL{
   627  					Host:   host,
   628  					Path:   path,
   629  					Scheme: *v.Protocol,
   630  				}).String()
   631  			}
   632  		}
   633  
   634  		if v := ws.RoutingRules; v != nil {
   635  			rr, err := normalizeRoutingRules(v)
   636  			if err != nil {
   637  				return fmt.Errorf("Error while marshaling routing rules: %s", err)
   638  			}
   639  			w["routing_rules"] = rr
   640  		}
   641  
   642  		websites = append(websites, w)
   643  	}
   644  	if err := d.Set("website", websites); err != nil {
   645  		return err
   646  	}
   647  
   648  	// Read the versioning configuration
   649  	versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{
   650  		Bucket: aws.String(d.Id()),
   651  	})
   652  	if err != nil {
   653  		return err
   654  	}
   655  	log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning)
   656  	if versioning != nil {
   657  		vcl := make([]map[string]interface{}, 0, 1)
   658  		vc := make(map[string]interface{})
   659  		if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled {
   660  			vc["enabled"] = true
   661  		} else {
   662  			vc["enabled"] = false
   663  		}
   664  
   665  		if versioning.MFADelete != nil && *versioning.MFADelete == s3.MFADeleteEnabled {
   666  			vc["mfa_delete"] = true
   667  		} else {
   668  			vc["mfa_delete"] = false
   669  		}
   670  		vcl = append(vcl, vc)
   671  		if err := d.Set("versioning", vcl); err != nil {
   672  			return err
   673  		}
   674  	}
   675  
   676  	// Read the acceleration status
   677  	accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{
   678  		Bucket: aws.String(d.Id()),
   679  	})
   680  	if err != nil {
   681  		// Amazon S3 Transfer Acceleration might not be supported in the
   682  		// given region, for example, China (Beijing) and the Government
   683  		// Cloud does not support this feature at the moment.
   684  		if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" {
   685  			return err
   686  		}
   687  
   688  		var awsRegion string
   689  		if region, ok := d.GetOk("region"); ok {
   690  			awsRegion = region.(string)
   691  		} else {
   692  			awsRegion = meta.(*AWSClient).region
   693  		}
   694  
   695  		log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), awsRegion)
   696  	} else {
   697  		log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate)
   698  		d.Set("acceleration_status", accelerate.Status)
   699  	}
   700  
   701  	// Read the request payer configuration.
   702  	payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{
   703  		Bucket: aws.String(d.Id()),
   704  	})
   705  	if err != nil {
   706  		return err
   707  	}
   708  	log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer)
   709  	if payer.Payer != nil {
   710  		if err := d.Set("request_payer", *payer.Payer); err != nil {
   711  			return err
   712  		}
   713  	}
   714  
   715  	// Read the logging configuration
   716  	logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{
   717  		Bucket: aws.String(d.Id()),
   718  	})
   719  	if err != nil {
   720  		return err
   721  	}
   722  
   723  	log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging)
   724  	if v := logging.LoggingEnabled; v != nil {
   725  		lcl := make([]map[string]interface{}, 0, 1)
   726  		lc := make(map[string]interface{})
   727  		if *v.TargetBucket != "" {
   728  			lc["target_bucket"] = *v.TargetBucket
   729  		}
   730  		if *v.TargetPrefix != "" {
   731  			lc["target_prefix"] = *v.TargetPrefix
   732  		}
   733  		lcl = append(lcl, lc)
   734  		if err := d.Set("logging", lcl); err != nil {
   735  			return err
   736  		}
   737  	}
   738  
   739  	// Read the lifecycle configuration
   740  	lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{
   741  		Bucket: aws.String(d.Id()),
   742  	})
   743  	if err != nil {
   744  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
   745  			return err
   746  		}
   747  	}
   748  	log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle)
   749  	if len(lifecycle.Rules) > 0 {
   750  		rules := make([]map[string]interface{}, 0, len(lifecycle.Rules))
   751  
   752  		for _, lifecycleRule := range lifecycle.Rules {
   753  			rule := make(map[string]interface{})
   754  
   755  			// ID
   756  			if lifecycleRule.ID != nil && *lifecycleRule.ID != "" {
   757  				rule["id"] = *lifecycleRule.ID
   758  			}
   759  			// Prefix
   760  			if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" {
   761  				rule["prefix"] = *lifecycleRule.Prefix
   762  			}
   763  			// Enabled
   764  			if lifecycleRule.Status != nil {
   765  				if *lifecycleRule.Status == s3.ExpirationStatusEnabled {
   766  					rule["enabled"] = true
   767  				} else {
   768  					rule["enabled"] = false
   769  				}
   770  			}
   771  
   772  			// AbortIncompleteMultipartUploadDays
   773  			if lifecycleRule.AbortIncompleteMultipartUpload != nil {
   774  				if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil {
   775  					rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
   776  				}
   777  			}
   778  
   779  			// expiration
   780  			if lifecycleRule.Expiration != nil {
   781  				e := make(map[string]interface{})
   782  				if lifecycleRule.Expiration.Date != nil {
   783  					e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02")
   784  				}
   785  				if lifecycleRule.Expiration.Days != nil {
   786  					e["days"] = int(*lifecycleRule.Expiration.Days)
   787  				}
   788  				if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil {
   789  					e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker
   790  				}
   791  				rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e})
   792  			}
   793  			// noncurrent_version_expiration
   794  			if lifecycleRule.NoncurrentVersionExpiration != nil {
   795  				e := make(map[string]interface{})
   796  				if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil {
   797  					e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)
   798  				}
   799  				rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e})
   800  			}
   801  			//// transition
   802  			if len(lifecycleRule.Transitions) > 0 {
   803  				transitions := make([]interface{}, 0, len(lifecycleRule.Transitions))
   804  				for _, v := range lifecycleRule.Transitions {
   805  					t := make(map[string]interface{})
   806  					if v.Date != nil {
   807  						t["date"] = (*v.Date).Format("2006-01-02")
   808  					}
   809  					if v.Days != nil {
   810  						t["days"] = int(*v.Days)
   811  					}
   812  					if v.StorageClass != nil {
   813  						t["storage_class"] = *v.StorageClass
   814  					}
   815  					transitions = append(transitions, t)
   816  				}
   817  				rule["transition"] = schema.NewSet(transitionHash, transitions)
   818  			}
   819  			// noncurrent_version_transition
   820  			if len(lifecycleRule.NoncurrentVersionTransitions) > 0 {
   821  				transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions))
   822  				for _, v := range lifecycleRule.NoncurrentVersionTransitions {
   823  					t := make(map[string]interface{})
   824  					if v.NoncurrentDays != nil {
   825  						t["days"] = int(*v.NoncurrentDays)
   826  					}
   827  					if v.StorageClass != nil {
   828  						t["storage_class"] = *v.StorageClass
   829  					}
   830  					transitions = append(transitions, t)
   831  				}
   832  				rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions)
   833  			}
   834  
   835  			rules = append(rules, rule)
   836  		}
   837  
   838  		if err := d.Set("lifecycle_rule", rules); err != nil {
   839  			return err
   840  		}
   841  	}
   842  
   843  	// Read the bucket replication configuration
   844  	replication, err := s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{
   845  		Bucket: aws.String(d.Id()),
   846  	})
   847  	if err != nil {
   848  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
   849  			return err
   850  		}
   851  	}
   852  
   853  	log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication)
   854  	if r := replication.ReplicationConfiguration; r != nil {
   855  		if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil {
   856  			log.Printf("[DEBUG] Error setting replication configuration: %s", err)
   857  			return err
   858  		}
   859  	}
   860  
   861  	// Add the region as an attribute
   862  	location, err := s3conn.GetBucketLocation(
   863  		&s3.GetBucketLocationInput{
   864  			Bucket: aws.String(d.Id()),
   865  		},
   866  	)
   867  	if err != nil {
   868  		return err
   869  	}
   870  	var region string
   871  	if location.LocationConstraint != nil {
   872  		region = *location.LocationConstraint
   873  	}
   874  	region = normalizeRegion(region)
   875  	if err := d.Set("region", region); err != nil {
   876  		return err
   877  	}
   878  
   879  	// Add the hosted zone ID for this bucket's region as an attribute
   880  	hostedZoneID := HostedZoneIDForRegion(region)
   881  	if err := d.Set("hosted_zone_id", hostedZoneID); err != nil {
   882  		return err
   883  	}
   884  
   885  	// Add website_endpoint as an attribute
   886  	websiteEndpoint, err := websiteEndpoint(s3conn, d)
   887  	if err != nil {
   888  		return err
   889  	}
   890  	if websiteEndpoint != nil {
   891  		if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil {
   892  			return err
   893  		}
   894  		if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil {
   895  			return err
   896  		}
   897  	}
   898  
   899  	tagSet, err := getTagSetS3(s3conn, d.Id())
   900  	if err != nil {
   901  		return err
   902  	}
   903  
   904  	if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil {
   905  		return err
   906  	}
   907  
   908  	d.Set("arn", fmt.Sprint("arn:aws:s3:::", d.Id()))
   909  
   910  	return nil
   911  }
   912  
   913  func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error {
   914  	s3conn := meta.(*AWSClient).s3conn
   915  
   916  	log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id())
   917  	_, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{
   918  		Bucket: aws.String(d.Id()),
   919  	})
   920  	if err != nil {
   921  		ec2err, ok := err.(awserr.Error)
   922  		if ok && ec2err.Code() == "BucketNotEmpty" {
   923  			if d.Get("force_destroy").(bool) {
   924  				// bucket may have things delete them
   925  				log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err)
   926  
   927  				bucket := d.Get("bucket").(string)
   928  				resp, err := s3conn.ListObjectVersions(
   929  					&s3.ListObjectVersionsInput{
   930  						Bucket: aws.String(bucket),
   931  					},
   932  				)
   933  
   934  				if err != nil {
   935  					return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err)
   936  				}
   937  
   938  				objectsToDelete := make([]*s3.ObjectIdentifier, 0)
   939  
   940  				if len(resp.DeleteMarkers) != 0 {
   941  
   942  					for _, v := range resp.DeleteMarkers {
   943  						objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
   944  							Key:       v.Key,
   945  							VersionId: v.VersionId,
   946  						})
   947  					}
   948  				}
   949  
   950  				if len(resp.Versions) != 0 {
   951  					for _, v := range resp.Versions {
   952  						objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
   953  							Key:       v.Key,
   954  							VersionId: v.VersionId,
   955  						})
   956  					}
   957  				}
   958  
   959  				params := &s3.DeleteObjectsInput{
   960  					Bucket: aws.String(bucket),
   961  					Delete: &s3.Delete{
   962  						Objects: objectsToDelete,
   963  					},
   964  				}
   965  
   966  				_, err = s3conn.DeleteObjects(params)
   967  
   968  				if err != nil {
   969  					return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err)
   970  				}
   971  
   972  				// this line recurses until all objects are deleted or an error is returned
   973  				return resourceAwsS3BucketDelete(d, meta)
   974  			}
   975  		}
   976  		return fmt.Errorf("Error deleting S3 Bucket: %s", err)
   977  	}
   978  	return nil
   979  }
   980  
   981  func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
   982  	bucket := d.Get("bucket").(string)
   983  	policy := d.Get("policy").(string)
   984  
   985  	if policy != "" {
   986  		log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy)
   987  
   988  		params := &s3.PutBucketPolicyInput{
   989  			Bucket: aws.String(bucket),
   990  			Policy: aws.String(policy),
   991  		}
   992  
   993  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
   994  			if _, err := s3conn.PutBucketPolicy(params); err != nil {
   995  				if awserr, ok := err.(awserr.Error); ok {
   996  					if awserr.Code() == "MalformedPolicy" {
   997  						return resource.RetryableError(awserr)
   998  					}
   999  				}
  1000  				return resource.NonRetryableError(err)
  1001  			}
  1002  			return nil
  1003  		})
  1004  
  1005  		if err != nil {
  1006  			return fmt.Errorf("Error putting S3 policy: %s", err)
  1007  		}
  1008  	} else {
  1009  		log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy)
  1010  		_, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
  1011  			Bucket: aws.String(bucket),
  1012  		})
  1013  
  1014  		if err != nil {
  1015  			return fmt.Errorf("Error deleting S3 policy: %s", err)
  1016  		}
  1017  	}
  1018  
  1019  	return nil
  1020  }
  1021  
  1022  func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1023  	bucket := d.Get("bucket").(string)
  1024  	rawCors := d.Get("cors_rule").([]interface{})
  1025  
  1026  	if len(rawCors) == 0 {
  1027  		// Delete CORS
  1028  		log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket)
  1029  		_, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{
  1030  			Bucket: aws.String(bucket),
  1031  		})
  1032  		if err != nil {
  1033  			return fmt.Errorf("Error deleting S3 CORS: %s", err)
  1034  		}
  1035  	} else {
  1036  		// Put CORS
  1037  		rules := make([]*s3.CORSRule, 0, len(rawCors))
  1038  		for _, cors := range rawCors {
  1039  			corsMap := cors.(map[string]interface{})
  1040  			r := &s3.CORSRule{}
  1041  			for k, v := range corsMap {
  1042  				log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v)
  1043  				if k == "max_age_seconds" {
  1044  					r.MaxAgeSeconds = aws.Int64(int64(v.(int)))
  1045  				} else {
  1046  					vMap := make([]*string, len(v.([]interface{})))
  1047  					for i, vv := range v.([]interface{}) {
  1048  						str := vv.(string)
  1049  						vMap[i] = aws.String(str)
  1050  					}
  1051  					switch k {
  1052  					case "allowed_headers":
  1053  						r.AllowedHeaders = vMap
  1054  					case "allowed_methods":
  1055  						r.AllowedMethods = vMap
  1056  					case "allowed_origins":
  1057  						r.AllowedOrigins = vMap
  1058  					case "expose_headers":
  1059  						r.ExposeHeaders = vMap
  1060  					}
  1061  				}
  1062  			}
  1063  			rules = append(rules, r)
  1064  		}
  1065  		corsInput := &s3.PutBucketCorsInput{
  1066  			Bucket: aws.String(bucket),
  1067  			CORSConfiguration: &s3.CORSConfiguration{
  1068  				CORSRules: rules,
  1069  			},
  1070  		}
  1071  		log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput)
  1072  		_, err := s3conn.PutBucketCors(corsInput)
  1073  		if err != nil {
  1074  			return fmt.Errorf("Error putting S3 CORS: %s", err)
  1075  		}
  1076  	}
  1077  
  1078  	return nil
  1079  }
  1080  
  1081  func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1082  	ws := d.Get("website").([]interface{})
  1083  
  1084  	if len(ws) == 1 {
  1085  		var w map[string]interface{}
  1086  		if ws[0] != nil {
  1087  			w = ws[0].(map[string]interface{})
  1088  		} else {
  1089  			w = make(map[string]interface{})
  1090  		}
  1091  		return resourceAwsS3BucketWebsitePut(s3conn, d, w)
  1092  	} else if len(ws) == 0 {
  1093  		return resourceAwsS3BucketWebsiteDelete(s3conn, d)
  1094  	} else {
  1095  		return fmt.Errorf("Cannot specify more than one website.")
  1096  	}
  1097  }
  1098  
  1099  func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error {
  1100  	bucket := d.Get("bucket").(string)
  1101  
  1102  	var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string
  1103  	if v, ok := website["index_document"]; ok {
  1104  		indexDocument = v.(string)
  1105  	}
  1106  	if v, ok := website["error_document"]; ok {
  1107  		errorDocument = v.(string)
  1108  	}
  1109  	if v, ok := website["redirect_all_requests_to"]; ok {
  1110  		redirectAllRequestsTo = v.(string)
  1111  	}
  1112  	if v, ok := website["routing_rules"]; ok {
  1113  		routingRules = v.(string)
  1114  	}
  1115  
  1116  	if indexDocument == "" && redirectAllRequestsTo == "" {
  1117  		return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.")
  1118  	}
  1119  
  1120  	websiteConfiguration := &s3.WebsiteConfiguration{}
  1121  
  1122  	if indexDocument != "" {
  1123  		websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)}
  1124  	}
  1125  
  1126  	if errorDocument != "" {
  1127  		websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)}
  1128  	}
  1129  
  1130  	if redirectAllRequestsTo != "" {
  1131  		redirect, err := url.Parse(redirectAllRequestsTo)
  1132  		if err == nil && redirect.Scheme != "" {
  1133  			var redirectHostBuf bytes.Buffer
  1134  			redirectHostBuf.WriteString(redirect.Host)
  1135  			if redirect.Path != "" {
  1136  				redirectHostBuf.WriteString(redirect.Path)
  1137  			}
  1138  			websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)}
  1139  		} else {
  1140  			websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)}
  1141  		}
  1142  	}
  1143  
  1144  	if routingRules != "" {
  1145  		var unmarshaledRules []*s3.RoutingRule
  1146  		if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil {
  1147  			return err
  1148  		}
  1149  		websiteConfiguration.RoutingRules = unmarshaledRules
  1150  	}
  1151  
  1152  	putInput := &s3.PutBucketWebsiteInput{
  1153  		Bucket:               aws.String(bucket),
  1154  		WebsiteConfiguration: websiteConfiguration,
  1155  	}
  1156  
  1157  	log.Printf("[DEBUG] S3 put bucket website: %#v", putInput)
  1158  
  1159  	_, err := s3conn.PutBucketWebsite(putInput)
  1160  	if err != nil {
  1161  		return fmt.Errorf("Error putting S3 website: %s", err)
  1162  	}
  1163  
  1164  	return nil
  1165  }
  1166  
  1167  func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error {
  1168  	bucket := d.Get("bucket").(string)
  1169  	deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)}
  1170  
  1171  	log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput)
  1172  
  1173  	_, err := s3conn.DeleteBucketWebsite(deleteInput)
  1174  	if err != nil {
  1175  		return fmt.Errorf("Error deleting S3 website: %s", err)
  1176  	}
  1177  
  1178  	d.Set("website_endpoint", "")
  1179  	d.Set("website_domain", "")
  1180  
  1181  	return nil
  1182  }
  1183  
  1184  func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) {
  1185  	// If the bucket doesn't have a website configuration, return an empty
  1186  	// endpoint
  1187  	if _, ok := d.GetOk("website"); !ok {
  1188  		return nil, nil
  1189  	}
  1190  
  1191  	bucket := d.Get("bucket").(string)
  1192  
  1193  	// Lookup the region for this bucket
  1194  	location, err := s3conn.GetBucketLocation(
  1195  		&s3.GetBucketLocationInput{
  1196  			Bucket: aws.String(bucket),
  1197  		},
  1198  	)
  1199  	if err != nil {
  1200  		return nil, err
  1201  	}
  1202  	var region string
  1203  	if location.LocationConstraint != nil {
  1204  		region = *location.LocationConstraint
  1205  	}
  1206  
  1207  	return WebsiteEndpoint(bucket, region), nil
  1208  }
  1209  
  1210  func WebsiteEndpoint(bucket string, region string) *S3Website {
  1211  	domain := WebsiteDomainUrl(region)
  1212  	return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain}
  1213  }
  1214  
  1215  func WebsiteDomainUrl(region string) string {
  1216  	region = normalizeRegion(region)
  1217  
  1218  	// New regions uses different syntax for website endpoints
  1219  	// http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
  1220  	if isOldRegion(region) {
  1221  		return fmt.Sprintf("s3-website-%s.amazonaws.com", region)
  1222  	}
  1223  	return fmt.Sprintf("s3-website.%s.amazonaws.com", region)
  1224  }
  1225  
  1226  func isOldRegion(region string) bool {
  1227  	oldRegions := []string{
  1228  		"ap-northeast-1",
  1229  		"ap-southeast-1",
  1230  		"ap-southeast-2",
  1231  		"eu-west-1",
  1232  		"sa-east-1",
  1233  		"us-east-1",
  1234  		"us-gov-west-1",
  1235  		"us-west-1",
  1236  		"us-west-2",
  1237  	}
  1238  	for _, r := range oldRegions {
  1239  		if region == r {
  1240  			return true
  1241  		}
  1242  	}
  1243  	return false
  1244  }
  1245  
  1246  func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1247  	acl := d.Get("acl").(string)
  1248  	bucket := d.Get("bucket").(string)
  1249  
  1250  	i := &s3.PutBucketAclInput{
  1251  		Bucket: aws.String(bucket),
  1252  		ACL:    aws.String(acl),
  1253  	}
  1254  	log.Printf("[DEBUG] S3 put bucket ACL: %#v", i)
  1255  
  1256  	_, err := s3conn.PutBucketAcl(i)
  1257  	if err != nil {
  1258  		return fmt.Errorf("Error putting S3 ACL: %s", err)
  1259  	}
  1260  
  1261  	return nil
  1262  }
  1263  
  1264  func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1265  	v := d.Get("versioning").([]interface{})
  1266  	bucket := d.Get("bucket").(string)
  1267  	vc := &s3.VersioningConfiguration{}
  1268  
  1269  	if len(v) > 0 {
  1270  		c := v[0].(map[string]interface{})
  1271  
  1272  		if c["enabled"].(bool) {
  1273  			vc.Status = aws.String(s3.BucketVersioningStatusEnabled)
  1274  		} else {
  1275  			vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
  1276  		}
  1277  
  1278  		if c["mfa_delete"].(bool) {
  1279  			vc.MFADelete = aws.String(s3.MFADeleteEnabled)
  1280  		} else {
  1281  			vc.MFADelete = aws.String(s3.MFADeleteDisabled)
  1282  		}
  1283  
  1284  	} else {
  1285  		vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
  1286  	}
  1287  
  1288  	i := &s3.PutBucketVersioningInput{
  1289  		Bucket:                  aws.String(bucket),
  1290  		VersioningConfiguration: vc,
  1291  	}
  1292  	log.Printf("[DEBUG] S3 put bucket versioning: %#v", i)
  1293  
  1294  	_, err := s3conn.PutBucketVersioning(i)
  1295  	if err != nil {
  1296  		return fmt.Errorf("Error putting S3 versioning: %s", err)
  1297  	}
  1298  
  1299  	return nil
  1300  }
  1301  
  1302  func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1303  	logging := d.Get("logging").(*schema.Set).List()
  1304  	bucket := d.Get("bucket").(string)
  1305  	loggingStatus := &s3.BucketLoggingStatus{}
  1306  
  1307  	if len(logging) > 0 {
  1308  		c := logging[0].(map[string]interface{})
  1309  
  1310  		loggingEnabled := &s3.LoggingEnabled{}
  1311  		if val, ok := c["target_bucket"]; ok {
  1312  			loggingEnabled.TargetBucket = aws.String(val.(string))
  1313  		}
  1314  		if val, ok := c["target_prefix"]; ok {
  1315  			loggingEnabled.TargetPrefix = aws.String(val.(string))
  1316  		}
  1317  
  1318  		loggingStatus.LoggingEnabled = loggingEnabled
  1319  	}
  1320  
  1321  	i := &s3.PutBucketLoggingInput{
  1322  		Bucket:              aws.String(bucket),
  1323  		BucketLoggingStatus: loggingStatus,
  1324  	}
  1325  	log.Printf("[DEBUG] S3 put bucket logging: %#v", i)
  1326  
  1327  	_, err := s3conn.PutBucketLogging(i)
  1328  	if err != nil {
  1329  		return fmt.Errorf("Error putting S3 logging: %s", err)
  1330  	}
  1331  
  1332  	return nil
  1333  }
  1334  
  1335  func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1336  	bucket := d.Get("bucket").(string)
  1337  	enableAcceleration := d.Get("acceleration_status").(string)
  1338  
  1339  	i := &s3.PutBucketAccelerateConfigurationInput{
  1340  		Bucket: aws.String(bucket),
  1341  		AccelerateConfiguration: &s3.AccelerateConfiguration{
  1342  			Status: aws.String(enableAcceleration),
  1343  		},
  1344  	}
  1345  	log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i)
  1346  
  1347  	_, err := s3conn.PutBucketAccelerateConfiguration(i)
  1348  	if err != nil {
  1349  		return fmt.Errorf("Error putting S3 acceleration: %s", err)
  1350  	}
  1351  
  1352  	return nil
  1353  }
  1354  
  1355  func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1356  	bucket := d.Get("bucket").(string)
  1357  	payer := d.Get("request_payer").(string)
  1358  
  1359  	i := &s3.PutBucketRequestPaymentInput{
  1360  		Bucket: aws.String(bucket),
  1361  		RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{
  1362  			Payer: aws.String(payer),
  1363  		},
  1364  	}
  1365  	log.Printf("[DEBUG] S3 put bucket request payer: %#v", i)
  1366  
  1367  	_, err := s3conn.PutBucketRequestPayment(i)
  1368  	if err != nil {
  1369  		return fmt.Errorf("Error putting S3 request payer: %s", err)
  1370  	}
  1371  
  1372  	return nil
  1373  }
  1374  
  1375  func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1376  	bucket := d.Get("bucket").(string)
  1377  	replicationConfiguration := d.Get("replication_configuration").([]interface{})
  1378  
  1379  	if len(replicationConfiguration) == 0 {
  1380  		i := &s3.DeleteBucketReplicationInput{
  1381  			Bucket: aws.String(bucket),
  1382  		}
  1383  
  1384  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1385  			if _, err := s3conn.DeleteBucketReplication(i); err != nil {
  1386  				return resource.NonRetryableError(err)
  1387  			}
  1388  			return nil
  1389  		})
  1390  		if err != nil {
  1391  			return fmt.Errorf("Error removing S3 bucket replication: %s", err)
  1392  		}
  1393  		return nil
  1394  	}
  1395  
  1396  	hasVersioning := false
  1397  	// Validate that bucket versioning is enabled
  1398  	if versioning, ok := d.GetOk("versioning"); ok {
  1399  		v := versioning.([]interface{})
  1400  
  1401  		if v[0].(map[string]interface{})["enabled"].(bool) {
  1402  			hasVersioning = true
  1403  		}
  1404  	}
  1405  
  1406  	if !hasVersioning {
  1407  		return fmt.Errorf("versioning must be enabled to allow S3 bucket replication")
  1408  	}
  1409  
  1410  	c := replicationConfiguration[0].(map[string]interface{})
  1411  
  1412  	rc := &s3.ReplicationConfiguration{}
  1413  	if val, ok := c["role"]; ok {
  1414  		rc.Role = aws.String(val.(string))
  1415  	}
  1416  
  1417  	rcRules := c["rules"].(*schema.Set).List()
  1418  	rules := []*s3.ReplicationRule{}
  1419  	for _, v := range rcRules {
  1420  		rr := v.(map[string]interface{})
  1421  		rcRule := &s3.ReplicationRule{
  1422  			Prefix: aws.String(rr["prefix"].(string)),
  1423  			Status: aws.String(rr["status"].(string)),
  1424  		}
  1425  
  1426  		if rrid, ok := rr["id"]; ok {
  1427  			rcRule.ID = aws.String(rrid.(string))
  1428  		}
  1429  
  1430  		ruleDestination := &s3.Destination{}
  1431  		if destination, ok := rr["destination"]; ok {
  1432  			dest := destination.(*schema.Set).List()
  1433  
  1434  			bd := dest[0].(map[string]interface{})
  1435  			ruleDestination.Bucket = aws.String(bd["bucket"].(string))
  1436  
  1437  			if storageClass, ok := bd["storage_class"]; ok && storageClass != "" {
  1438  				ruleDestination.StorageClass = aws.String(storageClass.(string))
  1439  			}
  1440  		}
  1441  		rcRule.Destination = ruleDestination
  1442  		rules = append(rules, rcRule)
  1443  	}
  1444  
  1445  	rc.Rules = rules
  1446  	i := &s3.PutBucketReplicationInput{
  1447  		Bucket: aws.String(bucket),
  1448  		ReplicationConfiguration: rc,
  1449  	}
  1450  	log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i)
  1451  
  1452  	_, err := s3conn.PutBucketReplication(i)
  1453  	if err != nil {
  1454  		return fmt.Errorf("Error putting S3 replication configuration: %s", err)
  1455  	}
  1456  
  1457  	return nil
  1458  }
  1459  
  1460  func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1461  	bucket := d.Get("bucket").(string)
  1462  
  1463  	lifecycleRules := d.Get("lifecycle_rule").([]interface{})
  1464  
  1465  	if len(lifecycleRules) == 0 {
  1466  		i := &s3.DeleteBucketLifecycleInput{
  1467  			Bucket: aws.String(bucket),
  1468  		}
  1469  
  1470  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1471  			if _, err := s3conn.DeleteBucketLifecycle(i); err != nil {
  1472  				return resource.NonRetryableError(err)
  1473  			}
  1474  			return nil
  1475  		})
  1476  		if err != nil {
  1477  			return fmt.Errorf("Error removing S3 lifecycle: %s", err)
  1478  		}
  1479  		return nil
  1480  	}
  1481  
  1482  	rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules))
  1483  
  1484  	for i, lifecycleRule := range lifecycleRules {
  1485  		r := lifecycleRule.(map[string]interface{})
  1486  
  1487  		rule := &s3.LifecycleRule{
  1488  			Prefix: aws.String(r["prefix"].(string)),
  1489  		}
  1490  
  1491  		// ID
  1492  		if val, ok := r["id"].(string); ok && val != "" {
  1493  			rule.ID = aws.String(val)
  1494  		} else {
  1495  			rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-"))
  1496  		}
  1497  
  1498  		// Enabled
  1499  		if val, ok := r["enabled"].(bool); ok && val {
  1500  			rule.Status = aws.String(s3.ExpirationStatusEnabled)
  1501  		} else {
  1502  			rule.Status = aws.String(s3.ExpirationStatusDisabled)
  1503  		}
  1504  
  1505  		// AbortIncompleteMultipartUpload
  1506  		if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 {
  1507  			rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{
  1508  				DaysAfterInitiation: aws.Int64(int64(val)),
  1509  			}
  1510  		}
  1511  
  1512  		// Expiration
  1513  		expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List()
  1514  		if len(expiration) > 0 {
  1515  			e := expiration[0].(map[string]interface{})
  1516  			i := &s3.LifecycleExpiration{}
  1517  
  1518  			if val, ok := e["date"].(string); ok && val != "" {
  1519  				t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
  1520  				if err != nil {
  1521  					return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
  1522  				}
  1523  				i.Date = aws.Time(t)
  1524  			} else if val, ok := e["days"].(int); ok && val > 0 {
  1525  				i.Days = aws.Int64(int64(val))
  1526  			} else if val, ok := e["expired_object_delete_marker"].(bool); ok {
  1527  				i.ExpiredObjectDeleteMarker = aws.Bool(val)
  1528  			}
  1529  			rule.Expiration = i
  1530  		}
  1531  
  1532  		// NoncurrentVersionExpiration
  1533  		nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List()
  1534  		if len(nc_expiration) > 0 {
  1535  			e := nc_expiration[0].(map[string]interface{})
  1536  
  1537  			if val, ok := e["days"].(int); ok && val > 0 {
  1538  				rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{
  1539  					NoncurrentDays: aws.Int64(int64(val)),
  1540  				}
  1541  			}
  1542  		}
  1543  
  1544  		// Transitions
  1545  		transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List()
  1546  		if len(transitions) > 0 {
  1547  			rule.Transitions = make([]*s3.Transition, 0, len(transitions))
  1548  			for _, transition := range transitions {
  1549  				transition := transition.(map[string]interface{})
  1550  				i := &s3.Transition{}
  1551  				if val, ok := transition["date"].(string); ok && val != "" {
  1552  					t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
  1553  					if err != nil {
  1554  						return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
  1555  					}
  1556  					i.Date = aws.Time(t)
  1557  				} else if val, ok := transition["days"].(int); ok && val > 0 {
  1558  					i.Days = aws.Int64(int64(val))
  1559  				}
  1560  				if val, ok := transition["storage_class"].(string); ok && val != "" {
  1561  					i.StorageClass = aws.String(val)
  1562  				}
  1563  
  1564  				rule.Transitions = append(rule.Transitions, i)
  1565  			}
  1566  		}
  1567  		// NoncurrentVersionTransitions
  1568  		nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List()
  1569  		if len(nc_transitions) > 0 {
  1570  			rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions))
  1571  			for _, transition := range nc_transitions {
  1572  				transition := transition.(map[string]interface{})
  1573  				i := &s3.NoncurrentVersionTransition{}
  1574  				if val, ok := transition["days"].(int); ok && val > 0 {
  1575  					i.NoncurrentDays = aws.Int64(int64(val))
  1576  				}
  1577  				if val, ok := transition["storage_class"].(string); ok && val != "" {
  1578  					i.StorageClass = aws.String(val)
  1579  				}
  1580  
  1581  				rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i)
  1582  			}
  1583  		}
  1584  
  1585  		rules = append(rules, rule)
  1586  	}
  1587  
  1588  	i := &s3.PutBucketLifecycleConfigurationInput{
  1589  		Bucket: aws.String(bucket),
  1590  		LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
  1591  			Rules: rules,
  1592  		},
  1593  	}
  1594  
  1595  	err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1596  		if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil {
  1597  			return resource.NonRetryableError(err)
  1598  		}
  1599  		return nil
  1600  	})
  1601  	if err != nil {
  1602  		return fmt.Errorf("Error putting S3 lifecycle: %s", err)
  1603  	}
  1604  
  1605  	return nil
  1606  }
  1607  
  1608  func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration) []map[string]interface{} {
  1609  	replication_configuration := make([]map[string]interface{}, 0, 1)
  1610  	m := make(map[string]interface{})
  1611  
  1612  	if r.Role != nil && *r.Role != "" {
  1613  		m["role"] = *r.Role
  1614  	}
  1615  
  1616  	rules := make([]interface{}, 0, len(r.Rules))
  1617  	for _, v := range r.Rules {
  1618  		t := make(map[string]interface{})
  1619  		if v.Destination != nil {
  1620  			rd := make(map[string]interface{})
  1621  			if v.Destination.Bucket != nil {
  1622  				rd["bucket"] = *v.Destination.Bucket
  1623  			}
  1624  			if v.Destination.StorageClass != nil {
  1625  				rd["storage_class"] = *v.Destination.StorageClass
  1626  			}
  1627  			t["destination"] = schema.NewSet(destinationHash, []interface{}{rd})
  1628  		}
  1629  
  1630  		if v.ID != nil {
  1631  			t["id"] = *v.ID
  1632  		}
  1633  		if v.Prefix != nil {
  1634  			t["prefix"] = *v.Prefix
  1635  		}
  1636  		if v.Status != nil {
  1637  			t["status"] = *v.Status
  1638  		}
  1639  		rules = append(rules, t)
  1640  	}
  1641  	m["rules"] = schema.NewSet(rulesHash, rules)
  1642  
  1643  	replication_configuration = append(replication_configuration, m)
  1644  
  1645  	return replication_configuration
  1646  }
  1647  
  1648  func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) {
  1649  	withNulls, err := json.Marshal(w)
  1650  	if err != nil {
  1651  		return "", err
  1652  	}
  1653  
  1654  	var rules []map[string]interface{}
  1655  	if err := json.Unmarshal(withNulls, &rules); err != nil {
  1656  		return "", err
  1657  	}
  1658  
  1659  	var cleanRules []map[string]interface{}
  1660  	for _, rule := range rules {
  1661  		cleanRules = append(cleanRules, removeNil(rule))
  1662  	}
  1663  
  1664  	withoutNulls, err := json.Marshal(cleanRules)
  1665  	if err != nil {
  1666  		return "", err
  1667  	}
  1668  
  1669  	return string(withoutNulls), nil
  1670  }
  1671  
  1672  func removeNil(data map[string]interface{}) map[string]interface{} {
  1673  	withoutNil := make(map[string]interface{})
  1674  
  1675  	for k, v := range data {
  1676  		if v == nil {
  1677  			continue
  1678  		}
  1679  
  1680  		switch v.(type) {
  1681  		case map[string]interface{}:
  1682  			withoutNil[k] = removeNil(v.(map[string]interface{}))
  1683  		default:
  1684  			withoutNil[k] = v
  1685  		}
  1686  	}
  1687  
  1688  	return withoutNil
  1689  }
  1690  
  1691  // DEPRECATED. Please consider using `normalizeJsonString` function instead.
  1692  func normalizeJson(jsonString interface{}) string {
  1693  	if jsonString == nil || jsonString == "" {
  1694  		return ""
  1695  	}
  1696  	var j interface{}
  1697  	err := json.Unmarshal([]byte(jsonString.(string)), &j)
  1698  	if err != nil {
  1699  		return fmt.Sprintf("Error parsing JSON: %s", err)
  1700  	}
  1701  	b, _ := json.Marshal(j)
  1702  	return string(b[:])
  1703  }
  1704  
  1705  func normalizeRegion(region string) string {
  1706  	// Default to us-east-1 if the bucket doesn't have a region:
  1707  	// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
  1708  	if region == "" {
  1709  		region = "us-east-1"
  1710  	}
  1711  
  1712  	return region
  1713  }
  1714  
  1715  func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) {
  1716  	validTypes := map[string]struct{}{
  1717  		"Enabled":   struct{}{},
  1718  		"Suspended": struct{}{},
  1719  	}
  1720  
  1721  	if _, ok := validTypes[v.(string)]; !ok {
  1722  		errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended"))
  1723  	}
  1724  	return
  1725  }
  1726  
  1727  func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) {
  1728  	value := v.(string)
  1729  	if value != s3.PayerRequester && value != s3.PayerBucketOwner {
  1730  		errors = append(errors, fmt.Errorf(
  1731  			"%q contains an invalid Request Payer type %q. Valid types are either %q or %q",
  1732  			k, value, s3.PayerRequester, s3.PayerBucketOwner))
  1733  	}
  1734  	return
  1735  }
  1736  
  1737  // validateS3BucketName validates any S3 bucket name that is not inside the us-east-1 region.
  1738  // Buckets outside of this region have to be DNS-compliant. After the same restrictions are
  1739  // applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc
  1740  func validateS3BucketName(value string, region string) error {
  1741  	if region != "us-east-1" {
  1742  		if (len(value) < 3) || (len(value) > 63) {
  1743  			return fmt.Errorf("%q must contain from 3 to 63 characters", value)
  1744  		}
  1745  		if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) {
  1746  			return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value)
  1747  		}
  1748  		if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) {
  1749  			return fmt.Errorf("%q must not be formatted as an IP address", value)
  1750  		}
  1751  		if strings.HasPrefix(value, `.`) {
  1752  			return fmt.Errorf("%q cannot start with a period", value)
  1753  		}
  1754  		if strings.HasSuffix(value, `.`) {
  1755  			return fmt.Errorf("%q cannot end with a period", value)
  1756  		}
  1757  		if strings.Contains(value, `..`) {
  1758  			return fmt.Errorf("%q can be only one period between labels", value)
  1759  		}
  1760  	} else {
  1761  		if len(value) > 255 {
  1762  			return fmt.Errorf("%q must contain less than 256 characters", value)
  1763  		}
  1764  		if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) {
  1765  			return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value)
  1766  		}
  1767  	}
  1768  	return nil
  1769  }
  1770  
  1771  func expirationHash(v interface{}) int {
  1772  	var buf bytes.Buffer
  1773  	m := v.(map[string]interface{})
  1774  	if v, ok := m["date"]; ok {
  1775  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1776  	}
  1777  	if v, ok := m["days"]; ok {
  1778  		buf.WriteString(fmt.Sprintf("%d-", v.(int)))
  1779  	}
  1780  	if v, ok := m["expired_object_delete_marker"]; ok {
  1781  		buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
  1782  	}
  1783  	return hashcode.String(buf.String())
  1784  }
  1785  
  1786  func transitionHash(v interface{}) int {
  1787  	var buf bytes.Buffer
  1788  	m := v.(map[string]interface{})
  1789  	if v, ok := m["date"]; ok {
  1790  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1791  	}
  1792  	if v, ok := m["days"]; ok {
  1793  		buf.WriteString(fmt.Sprintf("%d-", v.(int)))
  1794  	}
  1795  	if v, ok := m["storage_class"]; ok {
  1796  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1797  	}
  1798  	return hashcode.String(buf.String())
  1799  }
  1800  
  1801  func rulesHash(v interface{}) int {
  1802  	var buf bytes.Buffer
  1803  	m := v.(map[string]interface{})
  1804  
  1805  	if v, ok := m["id"]; ok {
  1806  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1807  	}
  1808  	if v, ok := m["prefix"]; ok {
  1809  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1810  	}
  1811  	if v, ok := m["status"]; ok {
  1812  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1813  	}
  1814  	return hashcode.String(buf.String())
  1815  }
  1816  
  1817  func destinationHash(v interface{}) int {
  1818  	var buf bytes.Buffer
  1819  	m := v.(map[string]interface{})
  1820  
  1821  	if v, ok := m["bucket"]; ok {
  1822  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1823  	}
  1824  	if v, ok := m["storage_class"]; ok {
  1825  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1826  	}
  1827  	return hashcode.String(buf.String())
  1828  }
  1829  
  1830  type S3Website struct {
  1831  	Endpoint, Domain string
  1832  }