github.com/minamijoyo/terraform@v0.7.8-0.20161029001309-18b3736ba44b/builtin/providers/aws/resource_aws_s3_bucket.go (about)

     1  package aws
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"fmt"
     7  	"log"
     8  	"net/url"
     9  	"time"
    10  
    11  	"github.com/aws/aws-sdk-go/aws"
    12  	"github.com/aws/aws-sdk-go/aws/awserr"
    13  	"github.com/aws/aws-sdk-go/service/s3"
    14  	"github.com/hashicorp/errwrap"
    15  	"github.com/hashicorp/terraform/helper/hashcode"
    16  	"github.com/hashicorp/terraform/helper/resource"
    17  	"github.com/hashicorp/terraform/helper/schema"
    18  )
    19  
    20  func resourceAwsS3Bucket() *schema.Resource {
    21  	return &schema.Resource{
    22  		Create: resourceAwsS3BucketCreate,
    23  		Read:   resourceAwsS3BucketRead,
    24  		Update: resourceAwsS3BucketUpdate,
    25  		Delete: resourceAwsS3BucketDelete,
    26  		Importer: &schema.ResourceImporter{
    27  			State: resourceAwsS3BucketImportState,
    28  		},
    29  
    30  		Schema: map[string]*schema.Schema{
    31  			"bucket": &schema.Schema{
    32  				Type:     schema.TypeString,
    33  				Required: true,
    34  				ForceNew: true,
    35  			},
    36  
    37  			"arn": &schema.Schema{
    38  				Type:     schema.TypeString,
    39  				Optional: true,
    40  				Computed: true,
    41  			},
    42  
    43  			"acl": &schema.Schema{
    44  				Type:     schema.TypeString,
    45  				Default:  "private",
    46  				Optional: true,
    47  			},
    48  
    49  			"policy": &schema.Schema{
    50  				Type:             schema.TypeString,
    51  				Optional:         true,
    52  				ValidateFunc:     validateJsonString,
    53  				DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs,
    54  			},
    55  
    56  			"cors_rule": &schema.Schema{
    57  				Type:     schema.TypeList,
    58  				Optional: true,
    59  				Elem: &schema.Resource{
    60  					Schema: map[string]*schema.Schema{
    61  						"allowed_headers": &schema.Schema{
    62  							Type:     schema.TypeList,
    63  							Optional: true,
    64  							Elem:     &schema.Schema{Type: schema.TypeString},
    65  						},
    66  						"allowed_methods": &schema.Schema{
    67  							Type:     schema.TypeList,
    68  							Required: true,
    69  							Elem:     &schema.Schema{Type: schema.TypeString},
    70  						},
    71  						"allowed_origins": &schema.Schema{
    72  							Type:     schema.TypeList,
    73  							Required: true,
    74  							Elem:     &schema.Schema{Type: schema.TypeString},
    75  						},
    76  						"expose_headers": &schema.Schema{
    77  							Type:     schema.TypeList,
    78  							Optional: true,
    79  							Elem:     &schema.Schema{Type: schema.TypeString},
    80  						},
    81  						"max_age_seconds": &schema.Schema{
    82  							Type:     schema.TypeInt,
    83  							Optional: true,
    84  						},
    85  					},
    86  				},
    87  			},
    88  
    89  			"website": &schema.Schema{
    90  				Type:     schema.TypeList,
    91  				Optional: true,
    92  				Elem: &schema.Resource{
    93  					Schema: map[string]*schema.Schema{
    94  						"index_document": &schema.Schema{
    95  							Type:     schema.TypeString,
    96  							Optional: true,
    97  						},
    98  
    99  						"error_document": &schema.Schema{
   100  							Type:     schema.TypeString,
   101  							Optional: true,
   102  						},
   103  
   104  						"redirect_all_requests_to": &schema.Schema{
   105  							Type: schema.TypeString,
   106  							ConflictsWith: []string{
   107  								"website.0.index_document",
   108  								"website.0.error_document",
   109  								"website.0.routing_rules",
   110  							},
   111  							Optional: true,
   112  						},
   113  
   114  						"routing_rules": &schema.Schema{
   115  							Type:         schema.TypeString,
   116  							Optional:     true,
   117  							ValidateFunc: validateJsonString,
   118  							StateFunc: func(v interface{}) string {
   119  								json, _ := normalizeJsonString(v)
   120  								return json
   121  							},
   122  						},
   123  					},
   124  				},
   125  			},
   126  
   127  			"hosted_zone_id": &schema.Schema{
   128  				Type:     schema.TypeString,
   129  				Optional: true,
   130  				Computed: true,
   131  			},
   132  
   133  			"region": &schema.Schema{
   134  				Type:     schema.TypeString,
   135  				Optional: true,
   136  				Computed: true,
   137  			},
   138  			"website_endpoint": &schema.Schema{
   139  				Type:     schema.TypeString,
   140  				Optional: true,
   141  				Computed: true,
   142  			},
   143  			"website_domain": &schema.Schema{
   144  				Type:     schema.TypeString,
   145  				Optional: true,
   146  				Computed: true,
   147  			},
   148  
   149  			"versioning": &schema.Schema{
   150  				Type:     schema.TypeSet,
   151  				Optional: true,
   152  				Elem: &schema.Resource{
   153  					Schema: map[string]*schema.Schema{
   154  						"enabled": &schema.Schema{
   155  							Type:     schema.TypeBool,
   156  							Optional: true,
   157  							Default:  false,
   158  						},
   159  					},
   160  				},
   161  				Set: func(v interface{}) int {
   162  					var buf bytes.Buffer
   163  					m := v.(map[string]interface{})
   164  					buf.WriteString(fmt.Sprintf("%t-", m["enabled"].(bool)))
   165  
   166  					return hashcode.String(buf.String())
   167  				},
   168  			},
   169  
   170  			"logging": &schema.Schema{
   171  				Type:     schema.TypeSet,
   172  				Optional: true,
   173  				Elem: &schema.Resource{
   174  					Schema: map[string]*schema.Schema{
   175  						"target_bucket": &schema.Schema{
   176  							Type:     schema.TypeString,
   177  							Required: true,
   178  						},
   179  						"target_prefix": &schema.Schema{
   180  							Type:     schema.TypeString,
   181  							Optional: true,
   182  						},
   183  					},
   184  				},
   185  				Set: func(v interface{}) int {
   186  					var buf bytes.Buffer
   187  					m := v.(map[string]interface{})
   188  					buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"]))
   189  					buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"]))
   190  					return hashcode.String(buf.String())
   191  				},
   192  			},
   193  
   194  			"lifecycle_rule": &schema.Schema{
   195  				Type:     schema.TypeList,
   196  				Optional: true,
   197  				Elem: &schema.Resource{
   198  					Schema: map[string]*schema.Schema{
   199  						"id": &schema.Schema{
   200  							Type:         schema.TypeString,
   201  							Optional:     true,
   202  							Computed:     true,
   203  							ValidateFunc: validateS3BucketLifecycleRuleId,
   204  						},
   205  						"prefix": &schema.Schema{
   206  							Type:     schema.TypeString,
   207  							Required: true,
   208  						},
   209  						"enabled": &schema.Schema{
   210  							Type:     schema.TypeBool,
   211  							Required: true,
   212  						},
   213  						"abort_incomplete_multipart_upload_days": &schema.Schema{
   214  							Type:     schema.TypeInt,
   215  							Optional: true,
   216  						},
   217  						"expiration": &schema.Schema{
   218  							Type:     schema.TypeSet,
   219  							Optional: true,
   220  							Set:      expirationHash,
   221  							Elem: &schema.Resource{
   222  								Schema: map[string]*schema.Schema{
   223  									"date": &schema.Schema{
   224  										Type:         schema.TypeString,
   225  										Optional:     true,
   226  										ValidateFunc: validateS3BucketLifecycleTimestamp,
   227  									},
   228  									"days": &schema.Schema{
   229  										Type:     schema.TypeInt,
   230  										Optional: true,
   231  									},
   232  									"expired_object_delete_marker": &schema.Schema{
   233  										Type:     schema.TypeBool,
   234  										Optional: true,
   235  									},
   236  								},
   237  							},
   238  						},
   239  						"noncurrent_version_expiration": &schema.Schema{
   240  							Type:     schema.TypeSet,
   241  							Optional: true,
   242  							Set:      expirationHash,
   243  							Elem: &schema.Resource{
   244  								Schema: map[string]*schema.Schema{
   245  									"days": &schema.Schema{
   246  										Type:     schema.TypeInt,
   247  										Optional: true,
   248  									},
   249  								},
   250  							},
   251  						},
   252  						"transition": &schema.Schema{
   253  							Type:     schema.TypeSet,
   254  							Optional: true,
   255  							Set:      transitionHash,
   256  							Elem: &schema.Resource{
   257  								Schema: map[string]*schema.Schema{
   258  									"date": &schema.Schema{
   259  										Type:         schema.TypeString,
   260  										Optional:     true,
   261  										ValidateFunc: validateS3BucketLifecycleTimestamp,
   262  									},
   263  									"days": &schema.Schema{
   264  										Type:     schema.TypeInt,
   265  										Optional: true,
   266  									},
   267  									"storage_class": &schema.Schema{
   268  										Type:         schema.TypeString,
   269  										Required:     true,
   270  										ValidateFunc: validateS3BucketLifecycleStorageClass,
   271  									},
   272  								},
   273  							},
   274  						},
   275  						"noncurrent_version_transition": &schema.Schema{
   276  							Type:     schema.TypeSet,
   277  							Optional: true,
   278  							Set:      transitionHash,
   279  							Elem: &schema.Resource{
   280  								Schema: map[string]*schema.Schema{
   281  									"days": &schema.Schema{
   282  										Type:     schema.TypeInt,
   283  										Optional: true,
   284  									},
   285  									"storage_class": &schema.Schema{
   286  										Type:         schema.TypeString,
   287  										Required:     true,
   288  										ValidateFunc: validateS3BucketLifecycleStorageClass,
   289  									},
   290  								},
   291  							},
   292  						},
   293  					},
   294  				},
   295  			},
   296  
   297  			"force_destroy": &schema.Schema{
   298  				Type:     schema.TypeBool,
   299  				Optional: true,
   300  				Default:  false,
   301  			},
   302  
   303  			"acceleration_status": &schema.Schema{
   304  				Type:         schema.TypeString,
   305  				Optional:     true,
   306  				Computed:     true,
   307  				ValidateFunc: validateS3BucketAccelerationStatus,
   308  			},
   309  
   310  			"request_payer": &schema.Schema{
   311  				Type:         schema.TypeString,
   312  				Optional:     true,
   313  				Computed:     true,
   314  				ValidateFunc: validateS3BucketRequestPayerType,
   315  			},
   316  
   317  			"tags": tagsSchema(),
   318  		},
   319  	}
   320  }
   321  
   322  func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
   323  	s3conn := meta.(*AWSClient).s3conn
   324  	awsRegion := meta.(*AWSClient).region
   325  
   326  	// Get the bucket and acl
   327  	bucket := d.Get("bucket").(string)
   328  	acl := d.Get("acl").(string)
   329  
   330  	log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
   331  
   332  	req := &s3.CreateBucketInput{
   333  		Bucket: aws.String(bucket),
   334  		ACL:    aws.String(acl),
   335  	}
   336  
   337  	// Special case us-east-1 region and do not set the LocationConstraint.
   338  	// See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html
   339  	if awsRegion != "us-east-1" {
   340  		req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
   341  			LocationConstraint: aws.String(awsRegion),
   342  		}
   343  	}
   344  
   345  	err := resource.Retry(5*time.Minute, func() *resource.RetryError {
   346  		log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket)
   347  		_, err := s3conn.CreateBucket(req)
   348  		if awsErr, ok := err.(awserr.Error); ok {
   349  			if awsErr.Code() == "OperationAborted" {
   350  				log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err)
   351  				return resource.RetryableError(
   352  					fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s",
   353  						bucket, err))
   354  			}
   355  		}
   356  		if err != nil {
   357  			return resource.NonRetryableError(err)
   358  		}
   359  
   360  		return nil
   361  	})
   362  
   363  	if err != nil {
   364  		return fmt.Errorf("Error creating S3 bucket: %s", err)
   365  	}
   366  
   367  	// Assign the bucket name as the resource ID
   368  	d.SetId(bucket)
   369  
   370  	return resourceAwsS3BucketUpdate(d, meta)
   371  }
   372  
   373  func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
   374  	s3conn := meta.(*AWSClient).s3conn
   375  	if err := setTagsS3(s3conn, d); err != nil {
   376  		return err
   377  	}
   378  
   379  	if d.HasChange("policy") {
   380  		if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil {
   381  			return err
   382  		}
   383  	}
   384  
   385  	if d.HasChange("cors_rule") {
   386  		if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil {
   387  			return err
   388  		}
   389  	}
   390  
   391  	if d.HasChange("website") {
   392  		if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil {
   393  			return err
   394  		}
   395  	}
   396  
   397  	if d.HasChange("versioning") {
   398  		if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil {
   399  			return err
   400  		}
   401  	}
   402  	if d.HasChange("acl") {
   403  		if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil {
   404  			return err
   405  		}
   406  	}
   407  
   408  	if d.HasChange("logging") {
   409  		if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil {
   410  			return err
   411  		}
   412  	}
   413  
   414  	if d.HasChange("lifecycle_rule") {
   415  		if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil {
   416  			return err
   417  		}
   418  	}
   419  
   420  	if d.HasChange("acceleration_status") {
   421  		if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil {
   422  			return err
   423  		}
   424  	}
   425  
   426  	if d.HasChange("request_payer") {
   427  		if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil {
   428  			return err
   429  		}
   430  	}
   431  
   432  	return resourceAwsS3BucketRead(d, meta)
   433  }
   434  
   435  func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
   436  	s3conn := meta.(*AWSClient).s3conn
   437  
   438  	var err error
   439  	_, err = s3conn.HeadBucket(&s3.HeadBucketInput{
   440  		Bucket: aws.String(d.Id()),
   441  	})
   442  	if err != nil {
   443  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 {
   444  			log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id())
   445  			d.SetId("")
   446  			return nil
   447  		} else {
   448  			// some of the AWS SDK's errors can be empty strings, so let's add
   449  			// some additional context.
   450  			return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err)
   451  		}
   452  	}
   453  
   454  	// In the import case, we won't have this
   455  	if _, ok := d.GetOk("bucket"); !ok {
   456  		d.Set("bucket", d.Id())
   457  	}
   458  
   459  	// Read the policy
   460  	if _, ok := d.GetOk("policy"); ok {
   461  		pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{
   462  			Bucket: aws.String(d.Id()),
   463  		})
   464  		log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol)
   465  		if err != nil {
   466  			if err := d.Set("policy", ""); err != nil {
   467  				return err
   468  			}
   469  		} else {
   470  			if v := pol.Policy; v == nil {
   471  				if err := d.Set("policy", ""); err != nil {
   472  					return err
   473  				}
   474  			} else {
   475  				policy, err := normalizeJsonString(*v)
   476  				if err != nil {
   477  					return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err)
   478  				}
   479  				d.Set("policy", policy)
   480  			}
   481  		}
   482  	}
   483  
   484  	// Read the CORS
   485  	cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{
   486  		Bucket: aws.String(d.Id()),
   487  	})
   488  	if err != nil {
   489  		// An S3 Bucket might not have CORS configuration set.
   490  		if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" {
   491  			return err
   492  		}
   493  		log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id())
   494  	}
   495  	log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors)
   496  	if cors.CORSRules != nil {
   497  		rules := make([]map[string]interface{}, 0, len(cors.CORSRules))
   498  		for _, ruleObject := range cors.CORSRules {
   499  			rule := make(map[string]interface{})
   500  			rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders)
   501  			rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods)
   502  			rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins)
   503  			// Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set.
   504  			if ruleObject.AllowedOrigins != nil {
   505  				rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders)
   506  			}
   507  			if ruleObject.MaxAgeSeconds != nil {
   508  				rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds)
   509  			}
   510  			rules = append(rules, rule)
   511  		}
   512  		if err := d.Set("cors_rule", rules); err != nil {
   513  			return err
   514  		}
   515  	}
   516  
   517  	// Read the website configuration
   518  	ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{
   519  		Bucket: aws.String(d.Id()),
   520  	})
   521  	var websites []map[string]interface{}
   522  	if err == nil {
   523  		w := make(map[string]interface{})
   524  
   525  		if v := ws.IndexDocument; v != nil {
   526  			w["index_document"] = *v.Suffix
   527  		}
   528  
   529  		if v := ws.ErrorDocument; v != nil {
   530  			w["error_document"] = *v.Key
   531  		}
   532  
   533  		if v := ws.RedirectAllRequestsTo; v != nil {
   534  			if v.Protocol == nil {
   535  				w["redirect_all_requests_to"] = *v.HostName
   536  			} else {
   537  				var host string
   538  				var path string
   539  				parsedHostName, err := url.Parse(*v.HostName)
   540  				if err == nil {
   541  					host = parsedHostName.Host
   542  					path = parsedHostName.Path
   543  				} else {
   544  					host = *v.HostName
   545  					path = ""
   546  				}
   547  
   548  				w["redirect_all_requests_to"] = (&url.URL{
   549  					Host:   host,
   550  					Path:   path,
   551  					Scheme: *v.Protocol,
   552  				}).String()
   553  			}
   554  		}
   555  
   556  		if v := ws.RoutingRules; v != nil {
   557  			rr, err := normalizeRoutingRules(v)
   558  			if err != nil {
   559  				return fmt.Errorf("Error while marshaling routing rules: %s", err)
   560  			}
   561  			w["routing_rules"] = rr
   562  		}
   563  
   564  		websites = append(websites, w)
   565  	}
   566  	if err := d.Set("website", websites); err != nil {
   567  		return err
   568  	}
   569  
   570  	// Read the versioning configuration
   571  	versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{
   572  		Bucket: aws.String(d.Id()),
   573  	})
   574  	if err != nil {
   575  		return err
   576  	}
   577  	log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning)
   578  	if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled {
   579  		vcl := make([]map[string]interface{}, 0, 1)
   580  		vc := make(map[string]interface{})
   581  		if *versioning.Status == s3.BucketVersioningStatusEnabled {
   582  			vc["enabled"] = true
   583  		} else {
   584  			vc["enabled"] = false
   585  		}
   586  		vcl = append(vcl, vc)
   587  		if err := d.Set("versioning", vcl); err != nil {
   588  			return err
   589  		}
   590  	}
   591  
   592  	//read the acceleration status
   593  	accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{
   594  		Bucket: aws.String(d.Id()),
   595  	})
   596  	if err != nil {
   597  		// Amazon S3 Transfer Acceleration might not be supported in the
   598  		// given region, for example, China (Beijing) and the Government
   599  		// Cloud does not support this feature at the moment.
   600  		if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" {
   601  			return err
   602  		}
   603  		log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), meta.(*AWSClient).region)
   604  	} else {
   605  		log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate)
   606  		d.Set("acceleration_status", accelerate.Status)
   607  	}
   608  
   609  	// Read the request payer configuration.
   610  	payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{
   611  		Bucket: aws.String(d.Id()),
   612  	})
   613  	if err != nil {
   614  		return err
   615  	}
   616  	log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer)
   617  	if payer.Payer != nil {
   618  		if err := d.Set("request_payer", *payer.Payer); err != nil {
   619  			return err
   620  		}
   621  	}
   622  
   623  	// Read the logging configuration
   624  	logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{
   625  		Bucket: aws.String(d.Id()),
   626  	})
   627  	if err != nil {
   628  		return err
   629  	}
   630  
   631  	log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging)
   632  	if v := logging.LoggingEnabled; v != nil {
   633  		lcl := make([]map[string]interface{}, 0, 1)
   634  		lc := make(map[string]interface{})
   635  		if *v.TargetBucket != "" {
   636  			lc["target_bucket"] = *v.TargetBucket
   637  		}
   638  		if *v.TargetPrefix != "" {
   639  			lc["target_prefix"] = *v.TargetPrefix
   640  		}
   641  		lcl = append(lcl, lc)
   642  		if err := d.Set("logging", lcl); err != nil {
   643  			return err
   644  		}
   645  	}
   646  
   647  	// Read the lifecycle configuration
   648  	lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{
   649  		Bucket: aws.String(d.Id()),
   650  	})
   651  	if err != nil {
   652  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
   653  			return err
   654  		}
   655  	}
   656  	log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle)
   657  	if len(lifecycle.Rules) > 0 {
   658  		rules := make([]map[string]interface{}, 0, len(lifecycle.Rules))
   659  
   660  		for _, lifecycleRule := range lifecycle.Rules {
   661  			rule := make(map[string]interface{})
   662  
   663  			// ID
   664  			if lifecycleRule.ID != nil && *lifecycleRule.ID != "" {
   665  				rule["id"] = *lifecycleRule.ID
   666  			}
   667  			// Prefix
   668  			if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" {
   669  				rule["prefix"] = *lifecycleRule.Prefix
   670  			}
   671  			// Enabled
   672  			if lifecycleRule.Status != nil {
   673  				if *lifecycleRule.Status == s3.ExpirationStatusEnabled {
   674  					rule["enabled"] = true
   675  				} else {
   676  					rule["enabled"] = false
   677  				}
   678  			}
   679  
   680  			// AbortIncompleteMultipartUploadDays
   681  			if lifecycleRule.AbortIncompleteMultipartUpload != nil {
   682  				if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil {
   683  					rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
   684  				}
   685  			}
   686  
   687  			// expiration
   688  			if lifecycleRule.Expiration != nil {
   689  				e := make(map[string]interface{})
   690  				if lifecycleRule.Expiration.Date != nil {
   691  					e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02")
   692  				}
   693  				if lifecycleRule.Expiration.Days != nil {
   694  					e["days"] = int(*lifecycleRule.Expiration.Days)
   695  				}
   696  				if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil {
   697  					e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker
   698  				}
   699  				rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e})
   700  			}
   701  			// noncurrent_version_expiration
   702  			if lifecycleRule.NoncurrentVersionExpiration != nil {
   703  				e := make(map[string]interface{})
   704  				if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil {
   705  					e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)
   706  				}
   707  				rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e})
   708  			}
   709  			//// transition
   710  			if len(lifecycleRule.Transitions) > 0 {
   711  				transitions := make([]interface{}, 0, len(lifecycleRule.Transitions))
   712  				for _, v := range lifecycleRule.Transitions {
   713  					t := make(map[string]interface{})
   714  					if v.Date != nil {
   715  						t["date"] = (*v.Date).Format("2006-01-02")
   716  					}
   717  					if v.Days != nil {
   718  						t["days"] = int(*v.Days)
   719  					}
   720  					if v.StorageClass != nil {
   721  						t["storage_class"] = *v.StorageClass
   722  					}
   723  					transitions = append(transitions, t)
   724  				}
   725  				rule["transition"] = schema.NewSet(transitionHash, transitions)
   726  			}
   727  			// noncurrent_version_transition
   728  			if len(lifecycleRule.NoncurrentVersionTransitions) > 0 {
   729  				transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions))
   730  				for _, v := range lifecycleRule.NoncurrentVersionTransitions {
   731  					t := make(map[string]interface{})
   732  					if v.NoncurrentDays != nil {
   733  						t["days"] = int(*v.NoncurrentDays)
   734  					}
   735  					if v.StorageClass != nil {
   736  						t["storage_class"] = *v.StorageClass
   737  					}
   738  					transitions = append(transitions, t)
   739  				}
   740  				rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions)
   741  			}
   742  
   743  			rules = append(rules, rule)
   744  		}
   745  
   746  		if err := d.Set("lifecycle_rule", rules); err != nil {
   747  			return err
   748  		}
   749  	}
   750  
   751  	// Add the region as an attribute
   752  	location, err := s3conn.GetBucketLocation(
   753  		&s3.GetBucketLocationInput{
   754  			Bucket: aws.String(d.Id()),
   755  		},
   756  	)
   757  	if err != nil {
   758  		return err
   759  	}
   760  	var region string
   761  	if location.LocationConstraint != nil {
   762  		region = *location.LocationConstraint
   763  	}
   764  	region = normalizeRegion(region)
   765  	if err := d.Set("region", region); err != nil {
   766  		return err
   767  	}
   768  
   769  	// Add the hosted zone ID for this bucket's region as an attribute
   770  	hostedZoneID := HostedZoneIDForRegion(region)
   771  	if err := d.Set("hosted_zone_id", hostedZoneID); err != nil {
   772  		return err
   773  	}
   774  
   775  	// Add website_endpoint as an attribute
   776  	websiteEndpoint, err := websiteEndpoint(s3conn, d)
   777  	if err != nil {
   778  		return err
   779  	}
   780  	if websiteEndpoint != nil {
   781  		if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil {
   782  			return err
   783  		}
   784  		if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil {
   785  			return err
   786  		}
   787  	}
   788  
   789  	tagSet, err := getTagSetS3(s3conn, d.Id())
   790  	if err != nil {
   791  		return err
   792  	}
   793  
   794  	if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil {
   795  		return err
   796  	}
   797  
   798  	d.Set("arn", fmt.Sprint("arn:aws:s3:::", d.Id()))
   799  
   800  	return nil
   801  }
   802  
   803  func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error {
   804  	s3conn := meta.(*AWSClient).s3conn
   805  
   806  	log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id())
   807  	_, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{
   808  		Bucket: aws.String(d.Id()),
   809  	})
   810  	if err != nil {
   811  		ec2err, ok := err.(awserr.Error)
   812  		if ok && ec2err.Code() == "BucketNotEmpty" {
   813  			if d.Get("force_destroy").(bool) {
   814  				// bucket may have things delete them
   815  				log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err)
   816  
   817  				bucket := d.Get("bucket").(string)
   818  				resp, err := s3conn.ListObjectVersions(
   819  					&s3.ListObjectVersionsInput{
   820  						Bucket: aws.String(bucket),
   821  					},
   822  				)
   823  
   824  				if err != nil {
   825  					return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err)
   826  				}
   827  
   828  				objectsToDelete := make([]*s3.ObjectIdentifier, 0)
   829  
   830  				if len(resp.DeleteMarkers) != 0 {
   831  
   832  					for _, v := range resp.DeleteMarkers {
   833  						objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
   834  							Key:       v.Key,
   835  							VersionId: v.VersionId,
   836  						})
   837  					}
   838  				}
   839  
   840  				if len(resp.Versions) != 0 {
   841  					for _, v := range resp.Versions {
   842  						objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
   843  							Key:       v.Key,
   844  							VersionId: v.VersionId,
   845  						})
   846  					}
   847  				}
   848  
   849  				params := &s3.DeleteObjectsInput{
   850  					Bucket: aws.String(bucket),
   851  					Delete: &s3.Delete{
   852  						Objects: objectsToDelete,
   853  					},
   854  				}
   855  
   856  				_, err = s3conn.DeleteObjects(params)
   857  
   858  				if err != nil {
   859  					return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err)
   860  				}
   861  
   862  				// this line recurses until all objects are deleted or an error is returned
   863  				return resourceAwsS3BucketDelete(d, meta)
   864  			}
   865  		}
   866  		return fmt.Errorf("Error deleting S3 Bucket: %s", err)
   867  	}
   868  	return nil
   869  }
   870  
   871  func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
   872  	bucket := d.Get("bucket").(string)
   873  	policy := d.Get("policy").(string)
   874  
   875  	if policy != "" {
   876  		log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy)
   877  
   878  		params := &s3.PutBucketPolicyInput{
   879  			Bucket: aws.String(bucket),
   880  			Policy: aws.String(policy),
   881  		}
   882  
   883  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
   884  			if _, err := s3conn.PutBucketPolicy(params); err != nil {
   885  				if awserr, ok := err.(awserr.Error); ok {
   886  					if awserr.Code() == "MalformedPolicy" {
   887  						return resource.RetryableError(awserr)
   888  					}
   889  				}
   890  				return resource.NonRetryableError(err)
   891  			}
   892  			return nil
   893  		})
   894  
   895  		if err != nil {
   896  			return fmt.Errorf("Error putting S3 policy: %s", err)
   897  		}
   898  	} else {
   899  		log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy)
   900  		_, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
   901  			Bucket: aws.String(bucket),
   902  		})
   903  
   904  		if err != nil {
   905  			return fmt.Errorf("Error deleting S3 policy: %s", err)
   906  		}
   907  	}
   908  
   909  	return nil
   910  }
   911  
   912  func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
   913  	bucket := d.Get("bucket").(string)
   914  	rawCors := d.Get("cors_rule").([]interface{})
   915  
   916  	if len(rawCors) == 0 {
   917  		// Delete CORS
   918  		log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket)
   919  		_, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{
   920  			Bucket: aws.String(bucket),
   921  		})
   922  		if err != nil {
   923  			return fmt.Errorf("Error deleting S3 CORS: %s", err)
   924  		}
   925  	} else {
   926  		// Put CORS
   927  		rules := make([]*s3.CORSRule, 0, len(rawCors))
   928  		for _, cors := range rawCors {
   929  			corsMap := cors.(map[string]interface{})
   930  			r := &s3.CORSRule{}
   931  			for k, v := range corsMap {
   932  				log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v)
   933  				if k == "max_age_seconds" {
   934  					r.MaxAgeSeconds = aws.Int64(int64(v.(int)))
   935  				} else {
   936  					vMap := make([]*string, len(v.([]interface{})))
   937  					for i, vv := range v.([]interface{}) {
   938  						str := vv.(string)
   939  						vMap[i] = aws.String(str)
   940  					}
   941  					switch k {
   942  					case "allowed_headers":
   943  						r.AllowedHeaders = vMap
   944  					case "allowed_methods":
   945  						r.AllowedMethods = vMap
   946  					case "allowed_origins":
   947  						r.AllowedOrigins = vMap
   948  					case "expose_headers":
   949  						r.ExposeHeaders = vMap
   950  					}
   951  				}
   952  			}
   953  			rules = append(rules, r)
   954  		}
   955  		corsInput := &s3.PutBucketCorsInput{
   956  			Bucket: aws.String(bucket),
   957  			CORSConfiguration: &s3.CORSConfiguration{
   958  				CORSRules: rules,
   959  			},
   960  		}
   961  		log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput)
   962  		_, err := s3conn.PutBucketCors(corsInput)
   963  		if err != nil {
   964  			return fmt.Errorf("Error putting S3 CORS: %s", err)
   965  		}
   966  	}
   967  
   968  	return nil
   969  }
   970  
   971  func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
   972  	ws := d.Get("website").([]interface{})
   973  
   974  	if len(ws) == 1 {
   975  		var w map[string]interface{}
   976  		if ws[0] != nil {
   977  			w = ws[0].(map[string]interface{})
   978  		} else {
   979  			w = make(map[string]interface{})
   980  		}
   981  		return resourceAwsS3BucketWebsitePut(s3conn, d, w)
   982  	} else if len(ws) == 0 {
   983  		return resourceAwsS3BucketWebsiteDelete(s3conn, d)
   984  	} else {
   985  		return fmt.Errorf("Cannot specify more than one website.")
   986  	}
   987  }
   988  
   989  func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error {
   990  	bucket := d.Get("bucket").(string)
   991  
   992  	var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string
   993  	if v, ok := website["index_document"]; ok {
   994  		indexDocument = v.(string)
   995  	}
   996  	if v, ok := website["error_document"]; ok {
   997  		errorDocument = v.(string)
   998  	}
   999  	if v, ok := website["redirect_all_requests_to"]; ok {
  1000  		redirectAllRequestsTo = v.(string)
  1001  	}
  1002  	if v, ok := website["routing_rules"]; ok {
  1003  		routingRules = v.(string)
  1004  	}
  1005  
  1006  	if indexDocument == "" && redirectAllRequestsTo == "" {
  1007  		return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.")
  1008  	}
  1009  
  1010  	websiteConfiguration := &s3.WebsiteConfiguration{}
  1011  
  1012  	if indexDocument != "" {
  1013  		websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)}
  1014  	}
  1015  
  1016  	if errorDocument != "" {
  1017  		websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)}
  1018  	}
  1019  
  1020  	if redirectAllRequestsTo != "" {
  1021  		redirect, err := url.Parse(redirectAllRequestsTo)
  1022  		if err == nil && redirect.Scheme != "" {
  1023  			var redirectHostBuf bytes.Buffer
  1024  			redirectHostBuf.WriteString(redirect.Host)
  1025  			if redirect.Path != "" {
  1026  				redirectHostBuf.WriteString(redirect.Path)
  1027  			}
  1028  			websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)}
  1029  		} else {
  1030  			websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)}
  1031  		}
  1032  	}
  1033  
  1034  	if routingRules != "" {
  1035  		var unmarshaledRules []*s3.RoutingRule
  1036  		if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil {
  1037  			return err
  1038  		}
  1039  		websiteConfiguration.RoutingRules = unmarshaledRules
  1040  	}
  1041  
  1042  	putInput := &s3.PutBucketWebsiteInput{
  1043  		Bucket:               aws.String(bucket),
  1044  		WebsiteConfiguration: websiteConfiguration,
  1045  	}
  1046  
  1047  	log.Printf("[DEBUG] S3 put bucket website: %#v", putInput)
  1048  
  1049  	_, err := s3conn.PutBucketWebsite(putInput)
  1050  	if err != nil {
  1051  		return fmt.Errorf("Error putting S3 website: %s", err)
  1052  	}
  1053  
  1054  	return nil
  1055  }
  1056  
  1057  func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error {
  1058  	bucket := d.Get("bucket").(string)
  1059  	deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)}
  1060  
  1061  	log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput)
  1062  
  1063  	_, err := s3conn.DeleteBucketWebsite(deleteInput)
  1064  	if err != nil {
  1065  		return fmt.Errorf("Error deleting S3 website: %s", err)
  1066  	}
  1067  
  1068  	d.Set("website_endpoint", "")
  1069  	d.Set("website_domain", "")
  1070  
  1071  	return nil
  1072  }
  1073  
  1074  func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) {
  1075  	// If the bucket doesn't have a website configuration, return an empty
  1076  	// endpoint
  1077  	if _, ok := d.GetOk("website"); !ok {
  1078  		return nil, nil
  1079  	}
  1080  
  1081  	bucket := d.Get("bucket").(string)
  1082  
  1083  	// Lookup the region for this bucket
  1084  	location, err := s3conn.GetBucketLocation(
  1085  		&s3.GetBucketLocationInput{
  1086  			Bucket: aws.String(bucket),
  1087  		},
  1088  	)
  1089  	if err != nil {
  1090  		return nil, err
  1091  	}
  1092  	var region string
  1093  	if location.LocationConstraint != nil {
  1094  		region = *location.LocationConstraint
  1095  	}
  1096  
  1097  	return WebsiteEndpoint(bucket, region), nil
  1098  }
  1099  
  1100  func WebsiteEndpoint(bucket string, region string) *S3Website {
  1101  	domain := WebsiteDomainUrl(region)
  1102  	return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain}
  1103  }
  1104  
  1105  func WebsiteDomainUrl(region string) string {
  1106  	region = normalizeRegion(region)
  1107  
  1108  	// New regions uses different syntax for website endpoints
  1109  	// http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
  1110  	if isOldRegion(region) {
  1111  		return fmt.Sprintf("s3-website-%s.amazonaws.com", region)
  1112  	}
  1113  	return fmt.Sprintf("s3-website.%s.amazonaws.com", region)
  1114  }
  1115  
  1116  func isOldRegion(region string) bool {
  1117  	oldRegions := []string{
  1118  		"ap-northeast-1",
  1119  		"ap-southeast-1",
  1120  		"ap-southeast-2",
  1121  		"eu-west-1",
  1122  		"sa-east-1",
  1123  		"us-east-1",
  1124  		"us-gov-west-1",
  1125  		"us-west-1",
  1126  		"us-west-2",
  1127  	}
  1128  	for _, r := range oldRegions {
  1129  		if region == r {
  1130  			return true
  1131  		}
  1132  	}
  1133  	return false
  1134  }
  1135  
  1136  func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1137  	acl := d.Get("acl").(string)
  1138  	bucket := d.Get("bucket").(string)
  1139  
  1140  	i := &s3.PutBucketAclInput{
  1141  		Bucket: aws.String(bucket),
  1142  		ACL:    aws.String(acl),
  1143  	}
  1144  	log.Printf("[DEBUG] S3 put bucket ACL: %#v", i)
  1145  
  1146  	_, err := s3conn.PutBucketAcl(i)
  1147  	if err != nil {
  1148  		return fmt.Errorf("Error putting S3 ACL: %s", err)
  1149  	}
  1150  
  1151  	return nil
  1152  }
  1153  
  1154  func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1155  	v := d.Get("versioning").(*schema.Set).List()
  1156  	bucket := d.Get("bucket").(string)
  1157  	vc := &s3.VersioningConfiguration{}
  1158  
  1159  	if len(v) > 0 {
  1160  		c := v[0].(map[string]interface{})
  1161  
  1162  		if c["enabled"].(bool) {
  1163  			vc.Status = aws.String(s3.BucketVersioningStatusEnabled)
  1164  		} else {
  1165  			vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
  1166  		}
  1167  	} else {
  1168  		vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
  1169  	}
  1170  
  1171  	i := &s3.PutBucketVersioningInput{
  1172  		Bucket:                  aws.String(bucket),
  1173  		VersioningConfiguration: vc,
  1174  	}
  1175  	log.Printf("[DEBUG] S3 put bucket versioning: %#v", i)
  1176  
  1177  	_, err := s3conn.PutBucketVersioning(i)
  1178  	if err != nil {
  1179  		return fmt.Errorf("Error putting S3 versioning: %s", err)
  1180  	}
  1181  
  1182  	return nil
  1183  }
  1184  
  1185  func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1186  	logging := d.Get("logging").(*schema.Set).List()
  1187  	bucket := d.Get("bucket").(string)
  1188  	loggingStatus := &s3.BucketLoggingStatus{}
  1189  
  1190  	if len(logging) > 0 {
  1191  		c := logging[0].(map[string]interface{})
  1192  
  1193  		loggingEnabled := &s3.LoggingEnabled{}
  1194  		if val, ok := c["target_bucket"]; ok {
  1195  			loggingEnabled.TargetBucket = aws.String(val.(string))
  1196  		}
  1197  		if val, ok := c["target_prefix"]; ok {
  1198  			loggingEnabled.TargetPrefix = aws.String(val.(string))
  1199  		}
  1200  
  1201  		loggingStatus.LoggingEnabled = loggingEnabled
  1202  	}
  1203  
  1204  	i := &s3.PutBucketLoggingInput{
  1205  		Bucket:              aws.String(bucket),
  1206  		BucketLoggingStatus: loggingStatus,
  1207  	}
  1208  	log.Printf("[DEBUG] S3 put bucket logging: %#v", i)
  1209  
  1210  	_, err := s3conn.PutBucketLogging(i)
  1211  	if err != nil {
  1212  		return fmt.Errorf("Error putting S3 logging: %s", err)
  1213  	}
  1214  
  1215  	return nil
  1216  }
  1217  
  1218  func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1219  	bucket := d.Get("bucket").(string)
  1220  	enableAcceleration := d.Get("acceleration_status").(string)
  1221  
  1222  	i := &s3.PutBucketAccelerateConfigurationInput{
  1223  		Bucket: aws.String(bucket),
  1224  		AccelerateConfiguration: &s3.AccelerateConfiguration{
  1225  			Status: aws.String(enableAcceleration),
  1226  		},
  1227  	}
  1228  	log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i)
  1229  
  1230  	_, err := s3conn.PutBucketAccelerateConfiguration(i)
  1231  	if err != nil {
  1232  		return fmt.Errorf("Error putting S3 acceleration: %s", err)
  1233  	}
  1234  
  1235  	return nil
  1236  }
  1237  
  1238  func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1239  	bucket := d.Get("bucket").(string)
  1240  	payer := d.Get("request_payer").(string)
  1241  
  1242  	i := &s3.PutBucketRequestPaymentInput{
  1243  		Bucket: aws.String(bucket),
  1244  		RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{
  1245  			Payer: aws.String(payer),
  1246  		},
  1247  	}
  1248  	log.Printf("[DEBUG] S3 put bucket request payer: %#v", i)
  1249  
  1250  	_, err := s3conn.PutBucketRequestPayment(i)
  1251  	if err != nil {
  1252  		return fmt.Errorf("Error putting S3 request payer: %s", err)
  1253  	}
  1254  
  1255  	return nil
  1256  }
  1257  
  1258  func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1259  	bucket := d.Get("bucket").(string)
  1260  
  1261  	lifecycleRules := d.Get("lifecycle_rule").([]interface{})
  1262  
  1263  	if len(lifecycleRules) == 0 {
  1264  		i := &s3.DeleteBucketLifecycleInput{
  1265  			Bucket: aws.String(bucket),
  1266  		}
  1267  
  1268  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1269  			if _, err := s3conn.DeleteBucketLifecycle(i); err != nil {
  1270  				return resource.NonRetryableError(err)
  1271  			}
  1272  			return nil
  1273  		})
  1274  		if err != nil {
  1275  			return fmt.Errorf("Error putting S3 lifecycle: %s", err)
  1276  		}
  1277  		return nil
  1278  	}
  1279  
  1280  	rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules))
  1281  
  1282  	for i, lifecycleRule := range lifecycleRules {
  1283  		r := lifecycleRule.(map[string]interface{})
  1284  
  1285  		rule := &s3.LifecycleRule{
  1286  			Prefix: aws.String(r["prefix"].(string)),
  1287  		}
  1288  
  1289  		// ID
  1290  		if val, ok := r["id"].(string); ok && val != "" {
  1291  			rule.ID = aws.String(val)
  1292  		} else {
  1293  			rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-"))
  1294  		}
  1295  
  1296  		// Enabled
  1297  		if val, ok := r["enabled"].(bool); ok && val {
  1298  			rule.Status = aws.String(s3.ExpirationStatusEnabled)
  1299  		} else {
  1300  			rule.Status = aws.String(s3.ExpirationStatusDisabled)
  1301  		}
  1302  
  1303  		// AbortIncompleteMultipartUpload
  1304  		if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 {
  1305  			rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{
  1306  				DaysAfterInitiation: aws.Int64(int64(val)),
  1307  			}
  1308  		}
  1309  
  1310  		// Expiration
  1311  		expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List()
  1312  		if len(expiration) > 0 {
  1313  			e := expiration[0].(map[string]interface{})
  1314  			i := &s3.LifecycleExpiration{}
  1315  
  1316  			if val, ok := e["date"].(string); ok && val != "" {
  1317  				t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
  1318  				if err != nil {
  1319  					return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
  1320  				}
  1321  				i.Date = aws.Time(t)
  1322  			} else if val, ok := e["days"].(int); ok && val > 0 {
  1323  				i.Days = aws.Int64(int64(val))
  1324  			} else if val, ok := e["expired_object_delete_marker"].(bool); ok {
  1325  				i.ExpiredObjectDeleteMarker = aws.Bool(val)
  1326  			}
  1327  			rule.Expiration = i
  1328  		}
  1329  
  1330  		// NoncurrentVersionExpiration
  1331  		nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List()
  1332  		if len(nc_expiration) > 0 {
  1333  			e := nc_expiration[0].(map[string]interface{})
  1334  
  1335  			if val, ok := e["days"].(int); ok && val > 0 {
  1336  				rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{
  1337  					NoncurrentDays: aws.Int64(int64(val)),
  1338  				}
  1339  			}
  1340  		}
  1341  
  1342  		// Transitions
  1343  		transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List()
  1344  		if len(transitions) > 0 {
  1345  			rule.Transitions = make([]*s3.Transition, 0, len(transitions))
  1346  			for _, transition := range transitions {
  1347  				transition := transition.(map[string]interface{})
  1348  				i := &s3.Transition{}
  1349  				if val, ok := transition["date"].(string); ok && val != "" {
  1350  					t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
  1351  					if err != nil {
  1352  						return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
  1353  					}
  1354  					i.Date = aws.Time(t)
  1355  				} else if val, ok := transition["days"].(int); ok && val > 0 {
  1356  					i.Days = aws.Int64(int64(val))
  1357  				}
  1358  				if val, ok := transition["storage_class"].(string); ok && val != "" {
  1359  					i.StorageClass = aws.String(val)
  1360  				}
  1361  
  1362  				rule.Transitions = append(rule.Transitions, i)
  1363  			}
  1364  		}
  1365  		// NoncurrentVersionTransitions
  1366  		nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List()
  1367  		if len(nc_transitions) > 0 {
  1368  			rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions))
  1369  			for _, transition := range nc_transitions {
  1370  				transition := transition.(map[string]interface{})
  1371  				i := &s3.NoncurrentVersionTransition{}
  1372  				if val, ok := transition["days"].(int); ok && val > 0 {
  1373  					i.NoncurrentDays = aws.Int64(int64(val))
  1374  				}
  1375  				if val, ok := transition["storage_class"].(string); ok && val != "" {
  1376  					i.StorageClass = aws.String(val)
  1377  				}
  1378  
  1379  				rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i)
  1380  			}
  1381  		}
  1382  
  1383  		rules = append(rules, rule)
  1384  	}
  1385  
  1386  	i := &s3.PutBucketLifecycleConfigurationInput{
  1387  		Bucket: aws.String(bucket),
  1388  		LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
  1389  			Rules: rules,
  1390  		},
  1391  	}
  1392  
  1393  	err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1394  		if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil {
  1395  			return resource.NonRetryableError(err)
  1396  		}
  1397  		return nil
  1398  	})
  1399  	if err != nil {
  1400  		return fmt.Errorf("Error putting S3 lifecycle: %s", err)
  1401  	}
  1402  
  1403  	return nil
  1404  }
  1405  
  1406  func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) {
  1407  	withNulls, err := json.Marshal(w)
  1408  	if err != nil {
  1409  		return "", err
  1410  	}
  1411  
  1412  	var rules []map[string]interface{}
  1413  	if err := json.Unmarshal(withNulls, &rules); err != nil {
  1414  		return "", err
  1415  	}
  1416  
  1417  	var cleanRules []map[string]interface{}
  1418  	for _, rule := range rules {
  1419  		cleanRules = append(cleanRules, removeNil(rule))
  1420  	}
  1421  
  1422  	withoutNulls, err := json.Marshal(cleanRules)
  1423  	if err != nil {
  1424  		return "", err
  1425  	}
  1426  
  1427  	return string(withoutNulls), nil
  1428  }
  1429  
  1430  func removeNil(data map[string]interface{}) map[string]interface{} {
  1431  	withoutNil := make(map[string]interface{})
  1432  
  1433  	for k, v := range data {
  1434  		if v == nil {
  1435  			continue
  1436  		}
  1437  
  1438  		switch v.(type) {
  1439  		case map[string]interface{}:
  1440  			withoutNil[k] = removeNil(v.(map[string]interface{}))
  1441  		default:
  1442  			withoutNil[k] = v
  1443  		}
  1444  	}
  1445  
  1446  	return withoutNil
  1447  }
  1448  
  1449  // DEPRECATED. Please consider using `normalizeJsonString` function instead.
  1450  func normalizeJson(jsonString interface{}) string {
  1451  	if jsonString == nil || jsonString == "" {
  1452  		return ""
  1453  	}
  1454  	var j interface{}
  1455  	err := json.Unmarshal([]byte(jsonString.(string)), &j)
  1456  	if err != nil {
  1457  		return fmt.Sprintf("Error parsing JSON: %s", err)
  1458  	}
  1459  	b, _ := json.Marshal(j)
  1460  	return string(b[:])
  1461  }
  1462  
  1463  func normalizeRegion(region string) string {
  1464  	// Default to us-east-1 if the bucket doesn't have a region:
  1465  	// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
  1466  	if region == "" {
  1467  		region = "us-east-1"
  1468  	}
  1469  
  1470  	return region
  1471  }
  1472  
  1473  func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) {
  1474  	validTypes := map[string]struct{}{
  1475  		"Enabled":   struct{}{},
  1476  		"Suspended": struct{}{},
  1477  	}
  1478  
  1479  	if _, ok := validTypes[v.(string)]; !ok {
  1480  		errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended"))
  1481  	}
  1482  	return
  1483  }
  1484  
  1485  func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) {
  1486  	value := v.(string)
  1487  	if value != s3.PayerRequester && value != s3.PayerBucketOwner {
  1488  		errors = append(errors, fmt.Errorf(
  1489  			"%q contains an invalid Request Payer type %q. Valid types are either %q or %q",
  1490  			k, value, s3.PayerRequester, s3.PayerBucketOwner))
  1491  	}
  1492  	return
  1493  }
  1494  
  1495  func expirationHash(v interface{}) int {
  1496  	var buf bytes.Buffer
  1497  	m := v.(map[string]interface{})
  1498  	if v, ok := m["date"]; ok {
  1499  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1500  	}
  1501  	if v, ok := m["days"]; ok {
  1502  		buf.WriteString(fmt.Sprintf("%d-", v.(int)))
  1503  	}
  1504  	if v, ok := m["expired_object_delete_marker"]; ok {
  1505  		buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
  1506  	}
  1507  	return hashcode.String(buf.String())
  1508  }
  1509  
  1510  func transitionHash(v interface{}) int {
  1511  	var buf bytes.Buffer
  1512  	m := v.(map[string]interface{})
  1513  	if v, ok := m["date"]; ok {
  1514  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1515  	}
  1516  	if v, ok := m["days"]; ok {
  1517  		buf.WriteString(fmt.Sprintf("%d-", v.(int)))
  1518  	}
  1519  	if v, ok := m["storage_class"]; ok {
  1520  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1521  	}
  1522  	return hashcode.String(buf.String())
  1523  }
  1524  
  1525  type S3Website struct {
  1526  	Endpoint, Domain string
  1527  }