github.com/nathanielks/terraform@v0.6.1-0.20170509030759-13e1a62319dc/builtin/providers/aws/resource_aws_s3_bucket.go (about)

     1  package aws
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/json"
     6  	"fmt"
     7  	"log"
     8  	"net/url"
     9  	"regexp"
    10  	"strings"
    11  	"time"
    12  
    13  	"github.com/aws/aws-sdk-go/aws"
    14  	"github.com/aws/aws-sdk-go/aws/awserr"
    15  	"github.com/aws/aws-sdk-go/service/s3"
    16  	"github.com/hashicorp/errwrap"
    17  	"github.com/hashicorp/terraform/helper/hashcode"
    18  	"github.com/hashicorp/terraform/helper/resource"
    19  	"github.com/hashicorp/terraform/helper/schema"
    20  )
    21  
    22  func resourceAwsS3Bucket() *schema.Resource {
    23  	return &schema.Resource{
    24  		Create: resourceAwsS3BucketCreate,
    25  		Read:   resourceAwsS3BucketRead,
    26  		Update: resourceAwsS3BucketUpdate,
    27  		Delete: resourceAwsS3BucketDelete,
    28  		Importer: &schema.ResourceImporter{
    29  			State: resourceAwsS3BucketImportState,
    30  		},
    31  
    32  		Schema: map[string]*schema.Schema{
    33  			"bucket": {
    34  				Type:          schema.TypeString,
    35  				Optional:      true,
    36  				Computed:      true,
    37  				ForceNew:      true,
    38  				ConflictsWith: []string{"bucket_prefix"},
    39  			},
    40  			"bucket_prefix": {
    41  				Type:     schema.TypeString,
    42  				Optional: true,
    43  				ForceNew: true,
    44  			},
    45  
    46  			"bucket_domain_name": {
    47  				Type:     schema.TypeString,
    48  				Computed: true,
    49  			},
    50  
    51  			"arn": {
    52  				Type:     schema.TypeString,
    53  				Optional: true,
    54  				Computed: true,
    55  			},
    56  
    57  			"acl": {
    58  				Type:     schema.TypeString,
    59  				Default:  "private",
    60  				Optional: true,
    61  			},
    62  
    63  			"policy": {
    64  				Type:             schema.TypeString,
    65  				Optional:         true,
    66  				ValidateFunc:     validateJsonString,
    67  				DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs,
    68  			},
    69  
    70  			"cors_rule": {
    71  				Type:     schema.TypeList,
    72  				Optional: true,
    73  				Elem: &schema.Resource{
    74  					Schema: map[string]*schema.Schema{
    75  						"allowed_headers": {
    76  							Type:     schema.TypeList,
    77  							Optional: true,
    78  							Elem:     &schema.Schema{Type: schema.TypeString},
    79  						},
    80  						"allowed_methods": {
    81  							Type:     schema.TypeList,
    82  							Required: true,
    83  							Elem:     &schema.Schema{Type: schema.TypeString},
    84  						},
    85  						"allowed_origins": {
    86  							Type:     schema.TypeList,
    87  							Required: true,
    88  							Elem:     &schema.Schema{Type: schema.TypeString},
    89  						},
    90  						"expose_headers": {
    91  							Type:     schema.TypeList,
    92  							Optional: true,
    93  							Elem:     &schema.Schema{Type: schema.TypeString},
    94  						},
    95  						"max_age_seconds": {
    96  							Type:     schema.TypeInt,
    97  							Optional: true,
    98  						},
    99  					},
   100  				},
   101  			},
   102  
   103  			"website": {
   104  				Type:     schema.TypeList,
   105  				Optional: true,
   106  				Elem: &schema.Resource{
   107  					Schema: map[string]*schema.Schema{
   108  						"index_document": {
   109  							Type:     schema.TypeString,
   110  							Optional: true,
   111  						},
   112  
   113  						"error_document": {
   114  							Type:     schema.TypeString,
   115  							Optional: true,
   116  						},
   117  
   118  						"redirect_all_requests_to": {
   119  							Type: schema.TypeString,
   120  							ConflictsWith: []string{
   121  								"website.0.index_document",
   122  								"website.0.error_document",
   123  								"website.0.routing_rules",
   124  							},
   125  							Optional: true,
   126  						},
   127  
   128  						"routing_rules": {
   129  							Type:         schema.TypeString,
   130  							Optional:     true,
   131  							ValidateFunc: validateJsonString,
   132  							StateFunc: func(v interface{}) string {
   133  								json, _ := normalizeJsonString(v)
   134  								return json
   135  							},
   136  						},
   137  					},
   138  				},
   139  			},
   140  
   141  			"hosted_zone_id": {
   142  				Type:     schema.TypeString,
   143  				Optional: true,
   144  				Computed: true,
   145  			},
   146  
   147  			"region": {
   148  				Type:     schema.TypeString,
   149  				Optional: true,
   150  				Computed: true,
   151  			},
   152  			"website_endpoint": {
   153  				Type:     schema.TypeString,
   154  				Optional: true,
   155  				Computed: true,
   156  			},
   157  			"website_domain": {
   158  				Type:     schema.TypeString,
   159  				Optional: true,
   160  				Computed: true,
   161  			},
   162  
   163  			"versioning": {
   164  				Type:     schema.TypeList,
   165  				Optional: true,
   166  				Computed: true,
   167  				MaxItems: 1,
   168  				Elem: &schema.Resource{
   169  					Schema: map[string]*schema.Schema{
   170  						"enabled": {
   171  							Type:     schema.TypeBool,
   172  							Optional: true,
   173  							Default:  false,
   174  						},
   175  						"mfa_delete": {
   176  							Type:     schema.TypeBool,
   177  							Optional: true,
   178  							Default:  false,
   179  						},
   180  					},
   181  				},
   182  			},
   183  
   184  			"logging": {
   185  				Type:     schema.TypeSet,
   186  				Optional: true,
   187  				Elem: &schema.Resource{
   188  					Schema: map[string]*schema.Schema{
   189  						"target_bucket": {
   190  							Type:     schema.TypeString,
   191  							Required: true,
   192  						},
   193  						"target_prefix": {
   194  							Type:     schema.TypeString,
   195  							Optional: true,
   196  						},
   197  					},
   198  				},
   199  				Set: func(v interface{}) int {
   200  					var buf bytes.Buffer
   201  					m := v.(map[string]interface{})
   202  					buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"]))
   203  					buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"]))
   204  					return hashcode.String(buf.String())
   205  				},
   206  			},
   207  
   208  			"lifecycle_rule": {
   209  				Type:     schema.TypeList,
   210  				Optional: true,
   211  				Elem: &schema.Resource{
   212  					Schema: map[string]*schema.Schema{
   213  						"id": {
   214  							Type:         schema.TypeString,
   215  							Optional:     true,
   216  							Computed:     true,
   217  							ValidateFunc: validateS3BucketLifecycleRuleId,
   218  						},
   219  						"prefix": {
   220  							Type:     schema.TypeString,
   221  							Required: true,
   222  						},
   223  						"enabled": {
   224  							Type:     schema.TypeBool,
   225  							Required: true,
   226  						},
   227  						"abort_incomplete_multipart_upload_days": {
   228  							Type:     schema.TypeInt,
   229  							Optional: true,
   230  						},
   231  						"expiration": {
   232  							Type:     schema.TypeSet,
   233  							Optional: true,
   234  							Set:      expirationHash,
   235  							Elem: &schema.Resource{
   236  								Schema: map[string]*schema.Schema{
   237  									"date": {
   238  										Type:         schema.TypeString,
   239  										Optional:     true,
   240  										ValidateFunc: validateS3BucketLifecycleTimestamp,
   241  									},
   242  									"days": {
   243  										Type:     schema.TypeInt,
   244  										Optional: true,
   245  									},
   246  									"expired_object_delete_marker": {
   247  										Type:     schema.TypeBool,
   248  										Optional: true,
   249  									},
   250  								},
   251  							},
   252  						},
   253  						"noncurrent_version_expiration": {
   254  							Type:     schema.TypeSet,
   255  							Optional: true,
   256  							Set:      expirationHash,
   257  							Elem: &schema.Resource{
   258  								Schema: map[string]*schema.Schema{
   259  									"days": {
   260  										Type:     schema.TypeInt,
   261  										Optional: true,
   262  									},
   263  								},
   264  							},
   265  						},
   266  						"transition": {
   267  							Type:     schema.TypeSet,
   268  							Optional: true,
   269  							Set:      transitionHash,
   270  							Elem: &schema.Resource{
   271  								Schema: map[string]*schema.Schema{
   272  									"date": {
   273  										Type:         schema.TypeString,
   274  										Optional:     true,
   275  										ValidateFunc: validateS3BucketLifecycleTimestamp,
   276  									},
   277  									"days": {
   278  										Type:     schema.TypeInt,
   279  										Optional: true,
   280  									},
   281  									"storage_class": {
   282  										Type:         schema.TypeString,
   283  										Required:     true,
   284  										ValidateFunc: validateS3BucketLifecycleStorageClass,
   285  									},
   286  								},
   287  							},
   288  						},
   289  						"noncurrent_version_transition": {
   290  							Type:     schema.TypeSet,
   291  							Optional: true,
   292  							Set:      transitionHash,
   293  							Elem: &schema.Resource{
   294  								Schema: map[string]*schema.Schema{
   295  									"days": {
   296  										Type:     schema.TypeInt,
   297  										Optional: true,
   298  									},
   299  									"storage_class": {
   300  										Type:         schema.TypeString,
   301  										Required:     true,
   302  										ValidateFunc: validateS3BucketLifecycleStorageClass,
   303  									},
   304  								},
   305  							},
   306  						},
   307  					},
   308  				},
   309  			},
   310  
   311  			"force_destroy": {
   312  				Type:     schema.TypeBool,
   313  				Optional: true,
   314  				Default:  false,
   315  			},
   316  
   317  			"acceleration_status": {
   318  				Type:         schema.TypeString,
   319  				Optional:     true,
   320  				Computed:     true,
   321  				ValidateFunc: validateS3BucketAccelerationStatus,
   322  			},
   323  
   324  			"request_payer": {
   325  				Type:         schema.TypeString,
   326  				Optional:     true,
   327  				Computed:     true,
   328  				ValidateFunc: validateS3BucketRequestPayerType,
   329  			},
   330  
   331  			"replication_configuration": {
   332  				Type:     schema.TypeList,
   333  				Optional: true,
   334  				MaxItems: 1,
   335  				Elem: &schema.Resource{
   336  					Schema: map[string]*schema.Schema{
   337  						"role": {
   338  							Type:     schema.TypeString,
   339  							Required: true,
   340  						},
   341  						"rules": {
   342  							Type:     schema.TypeSet,
   343  							Required: true,
   344  							Set:      rulesHash,
   345  							Elem: &schema.Resource{
   346  								Schema: map[string]*schema.Schema{
   347  									"id": {
   348  										Type:         schema.TypeString,
   349  										Optional:     true,
   350  										ValidateFunc: validateS3BucketReplicationRuleId,
   351  									},
   352  									"destination": {
   353  										Type:     schema.TypeSet,
   354  										MaxItems: 1,
   355  										MinItems: 1,
   356  										Required: true,
   357  										Set:      destinationHash,
   358  										Elem: &schema.Resource{
   359  											Schema: map[string]*schema.Schema{
   360  												"bucket": {
   361  													Type:         schema.TypeString,
   362  													Required:     true,
   363  													ValidateFunc: validateArn,
   364  												},
   365  												"storage_class": {
   366  													Type:         schema.TypeString,
   367  													Optional:     true,
   368  													ValidateFunc: validateS3BucketReplicationDestinationStorageClass,
   369  												},
   370  											},
   371  										},
   372  									},
   373  									"prefix": {
   374  										Type:         schema.TypeString,
   375  										Required:     true,
   376  										ValidateFunc: validateS3BucketReplicationRulePrefix,
   377  									},
   378  									"status": {
   379  										Type:         schema.TypeString,
   380  										Required:     true,
   381  										ValidateFunc: validateS3BucketReplicationRuleStatus,
   382  									},
   383  								},
   384  							},
   385  						},
   386  					},
   387  				},
   388  			},
   389  
   390  			"tags": tagsSchema(),
   391  		},
   392  	}
   393  }
   394  
   395  func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
   396  	s3conn := meta.(*AWSClient).s3conn
   397  
   398  	// Get the bucket and acl
   399  	var bucket string
   400  	if v, ok := d.GetOk("bucket"); ok {
   401  		bucket = v.(string)
   402  	} else if v, ok := d.GetOk("bucket_prefix"); ok {
   403  		bucket = resource.PrefixedUniqueId(v.(string))
   404  	} else {
   405  		bucket = resource.UniqueId()
   406  	}
   407  	d.Set("bucket", bucket)
   408  	acl := d.Get("acl").(string)
   409  
   410  	log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
   411  
   412  	req := &s3.CreateBucketInput{
   413  		Bucket: aws.String(bucket),
   414  		ACL:    aws.String(acl),
   415  	}
   416  
   417  	var awsRegion string
   418  	if region, ok := d.GetOk("region"); ok {
   419  		awsRegion = region.(string)
   420  	} else {
   421  		awsRegion = meta.(*AWSClient).region
   422  	}
   423  	log.Printf("[DEBUG] S3 bucket create: %s, using region: %s", bucket, awsRegion)
   424  
   425  	// Special case us-east-1 region and do not set the LocationConstraint.
   426  	// See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html
   427  	if awsRegion != "us-east-1" {
   428  		req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
   429  			LocationConstraint: aws.String(awsRegion),
   430  		}
   431  	}
   432  
   433  	if err := validateS3BucketName(bucket, awsRegion); err != nil {
   434  		return fmt.Errorf("Error validating S3 bucket name: %s", err)
   435  	}
   436  
   437  	err := resource.Retry(5*time.Minute, func() *resource.RetryError {
   438  		log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket)
   439  		_, err := s3conn.CreateBucket(req)
   440  		if awsErr, ok := err.(awserr.Error); ok {
   441  			if awsErr.Code() == "OperationAborted" {
   442  				log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err)
   443  				return resource.RetryableError(
   444  					fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s",
   445  						bucket, err))
   446  			}
   447  		}
   448  		if err != nil {
   449  			return resource.NonRetryableError(err)
   450  		}
   451  
   452  		return nil
   453  	})
   454  
   455  	if err != nil {
   456  		return fmt.Errorf("Error creating S3 bucket: %s", err)
   457  	}
   458  
   459  	// Assign the bucket name as the resource ID
   460  	d.SetId(bucket)
   461  
   462  	return resourceAwsS3BucketUpdate(d, meta)
   463  }
   464  
   465  func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
   466  	s3conn := meta.(*AWSClient).s3conn
   467  	if err := setTagsS3(s3conn, d); err != nil {
   468  		return fmt.Errorf("%q: %s", d.Get("bucket").(string), err)
   469  	}
   470  
   471  	if d.HasChange("policy") {
   472  		if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil {
   473  			return err
   474  		}
   475  	}
   476  
   477  	if d.HasChange("cors_rule") {
   478  		if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil {
   479  			return err
   480  		}
   481  	}
   482  
   483  	if d.HasChange("website") {
   484  		if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil {
   485  			return err
   486  		}
   487  	}
   488  
   489  	if d.HasChange("versioning") {
   490  		if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil {
   491  			return err
   492  		}
   493  	}
   494  	if d.HasChange("acl") {
   495  		if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil {
   496  			return err
   497  		}
   498  	}
   499  
   500  	if d.HasChange("logging") {
   501  		if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil {
   502  			return err
   503  		}
   504  	}
   505  
   506  	if d.HasChange("lifecycle_rule") {
   507  		if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil {
   508  			return err
   509  		}
   510  	}
   511  
   512  	if d.HasChange("acceleration_status") {
   513  		if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil {
   514  			return err
   515  		}
   516  	}
   517  
   518  	if d.HasChange("request_payer") {
   519  		if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil {
   520  			return err
   521  		}
   522  	}
   523  
   524  	if d.HasChange("replication_configuration") {
   525  		if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil {
   526  			return err
   527  		}
   528  	}
   529  
   530  	return resourceAwsS3BucketRead(d, meta)
   531  }
   532  
   533  func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
   534  	s3conn := meta.(*AWSClient).s3conn
   535  
   536  	var err error
   537  	_, err = s3conn.HeadBucket(&s3.HeadBucketInput{
   538  		Bucket: aws.String(d.Id()),
   539  	})
   540  	if err != nil {
   541  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 {
   542  			log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id())
   543  			d.SetId("")
   544  			return nil
   545  		} else {
   546  			// some of the AWS SDK's errors can be empty strings, so let's add
   547  			// some additional context.
   548  			return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err)
   549  		}
   550  	}
   551  
   552  	// In the import case, we won't have this
   553  	if _, ok := d.GetOk("bucket"); !ok {
   554  		d.Set("bucket", d.Id())
   555  	}
   556  
   557  	d.Set("bucket_domain_name", bucketDomainName(d.Get("bucket").(string)))
   558  
   559  	// Read the policy
   560  	if _, ok := d.GetOk("policy"); ok {
   561  		pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{
   562  			Bucket: aws.String(d.Id()),
   563  		})
   564  		log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol)
   565  		if err != nil {
   566  			if err := d.Set("policy", ""); err != nil {
   567  				return err
   568  			}
   569  		} else {
   570  			if v := pol.Policy; v == nil {
   571  				if err := d.Set("policy", ""); err != nil {
   572  					return err
   573  				}
   574  			} else {
   575  				policy, err := normalizeJsonString(*v)
   576  				if err != nil {
   577  					return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err)
   578  				}
   579  				d.Set("policy", policy)
   580  			}
   581  		}
   582  	}
   583  
   584  	// Read the CORS
   585  	cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{
   586  		Bucket: aws.String(d.Id()),
   587  	})
   588  	if err != nil {
   589  		// An S3 Bucket might not have CORS configuration set.
   590  		if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" {
   591  			return err
   592  		}
   593  		log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id())
   594  	}
   595  	log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors)
   596  	if cors.CORSRules != nil {
   597  		rules := make([]map[string]interface{}, 0, len(cors.CORSRules))
   598  		for _, ruleObject := range cors.CORSRules {
   599  			rule := make(map[string]interface{})
   600  			rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders)
   601  			rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods)
   602  			rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins)
   603  			// Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set.
   604  			if ruleObject.AllowedOrigins != nil {
   605  				rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders)
   606  			}
   607  			if ruleObject.MaxAgeSeconds != nil {
   608  				rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds)
   609  			}
   610  			rules = append(rules, rule)
   611  		}
   612  		if err := d.Set("cors_rule", rules); err != nil {
   613  			return err
   614  		}
   615  	}
   616  
   617  	// Read the website configuration
   618  	ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{
   619  		Bucket: aws.String(d.Id()),
   620  	})
   621  	var websites []map[string]interface{}
   622  	if err == nil {
   623  		w := make(map[string]interface{})
   624  
   625  		if v := ws.IndexDocument; v != nil {
   626  			w["index_document"] = *v.Suffix
   627  		}
   628  
   629  		if v := ws.ErrorDocument; v != nil {
   630  			w["error_document"] = *v.Key
   631  		}
   632  
   633  		if v := ws.RedirectAllRequestsTo; v != nil {
   634  			if v.Protocol == nil {
   635  				w["redirect_all_requests_to"] = *v.HostName
   636  			} else {
   637  				var host string
   638  				var path string
   639  				parsedHostName, err := url.Parse(*v.HostName)
   640  				if err == nil {
   641  					host = parsedHostName.Host
   642  					path = parsedHostName.Path
   643  				} else {
   644  					host = *v.HostName
   645  					path = ""
   646  				}
   647  
   648  				w["redirect_all_requests_to"] = (&url.URL{
   649  					Host:   host,
   650  					Path:   path,
   651  					Scheme: *v.Protocol,
   652  				}).String()
   653  			}
   654  		}
   655  
   656  		if v := ws.RoutingRules; v != nil {
   657  			rr, err := normalizeRoutingRules(v)
   658  			if err != nil {
   659  				return fmt.Errorf("Error while marshaling routing rules: %s", err)
   660  			}
   661  			w["routing_rules"] = rr
   662  		}
   663  
   664  		websites = append(websites, w)
   665  	}
   666  	if err := d.Set("website", websites); err != nil {
   667  		return err
   668  	}
   669  
   670  	// Read the versioning configuration
   671  	versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{
   672  		Bucket: aws.String(d.Id()),
   673  	})
   674  	if err != nil {
   675  		return err
   676  	}
   677  	log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning)
   678  	if versioning != nil {
   679  		vcl := make([]map[string]interface{}, 0, 1)
   680  		vc := make(map[string]interface{})
   681  		if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled {
   682  			vc["enabled"] = true
   683  		} else {
   684  			vc["enabled"] = false
   685  		}
   686  
   687  		if versioning.MFADelete != nil && *versioning.MFADelete == s3.MFADeleteEnabled {
   688  			vc["mfa_delete"] = true
   689  		} else {
   690  			vc["mfa_delete"] = false
   691  		}
   692  		vcl = append(vcl, vc)
   693  		if err := d.Set("versioning", vcl); err != nil {
   694  			return err
   695  		}
   696  	}
   697  
   698  	// Read the acceleration status
   699  	accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{
   700  		Bucket: aws.String(d.Id()),
   701  	})
   702  	if err != nil {
   703  		// Amazon S3 Transfer Acceleration might not be supported in the
   704  		// given region, for example, China (Beijing) and the Government
   705  		// Cloud does not support this feature at the moment.
   706  		if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" {
   707  			return err
   708  		}
   709  
   710  		var awsRegion string
   711  		if region, ok := d.GetOk("region"); ok {
   712  			awsRegion = region.(string)
   713  		} else {
   714  			awsRegion = meta.(*AWSClient).region
   715  		}
   716  
   717  		log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), awsRegion)
   718  	} else {
   719  		log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate)
   720  		d.Set("acceleration_status", accelerate.Status)
   721  	}
   722  
   723  	// Read the request payer configuration.
   724  	payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{
   725  		Bucket: aws.String(d.Id()),
   726  	})
   727  	if err != nil {
   728  		return err
   729  	}
   730  	log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer)
   731  	if payer.Payer != nil {
   732  		if err := d.Set("request_payer", *payer.Payer); err != nil {
   733  			return err
   734  		}
   735  	}
   736  
   737  	// Read the logging configuration
   738  	logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{
   739  		Bucket: aws.String(d.Id()),
   740  	})
   741  	if err != nil {
   742  		return err
   743  	}
   744  
   745  	log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging)
   746  	lcl := make([]map[string]interface{}, 0, 1)
   747  	if v := logging.LoggingEnabled; v != nil {
   748  		lc := make(map[string]interface{})
   749  		if *v.TargetBucket != "" {
   750  			lc["target_bucket"] = *v.TargetBucket
   751  		}
   752  		if *v.TargetPrefix != "" {
   753  			lc["target_prefix"] = *v.TargetPrefix
   754  		}
   755  		lcl = append(lcl, lc)
   756  	}
   757  	if err := d.Set("logging", lcl); err != nil {
   758  		return err
   759  	}
   760  
   761  	// Read the lifecycle configuration
   762  	lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{
   763  		Bucket: aws.String(d.Id()),
   764  	})
   765  	if err != nil {
   766  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
   767  			return err
   768  		}
   769  	}
   770  	log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle)
   771  	if len(lifecycle.Rules) > 0 {
   772  		rules := make([]map[string]interface{}, 0, len(lifecycle.Rules))
   773  
   774  		for _, lifecycleRule := range lifecycle.Rules {
   775  			rule := make(map[string]interface{})
   776  
   777  			// ID
   778  			if lifecycleRule.ID != nil && *lifecycleRule.ID != "" {
   779  				rule["id"] = *lifecycleRule.ID
   780  			}
   781  			// Prefix
   782  			if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" {
   783  				rule["prefix"] = *lifecycleRule.Prefix
   784  			}
   785  			// Enabled
   786  			if lifecycleRule.Status != nil {
   787  				if *lifecycleRule.Status == s3.ExpirationStatusEnabled {
   788  					rule["enabled"] = true
   789  				} else {
   790  					rule["enabled"] = false
   791  				}
   792  			}
   793  
   794  			// AbortIncompleteMultipartUploadDays
   795  			if lifecycleRule.AbortIncompleteMultipartUpload != nil {
   796  				if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil {
   797  					rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
   798  				}
   799  			}
   800  
   801  			// expiration
   802  			if lifecycleRule.Expiration != nil {
   803  				e := make(map[string]interface{})
   804  				if lifecycleRule.Expiration.Date != nil {
   805  					e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02")
   806  				}
   807  				if lifecycleRule.Expiration.Days != nil {
   808  					e["days"] = int(*lifecycleRule.Expiration.Days)
   809  				}
   810  				if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil {
   811  					e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker
   812  				}
   813  				rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e})
   814  			}
   815  			// noncurrent_version_expiration
   816  			if lifecycleRule.NoncurrentVersionExpiration != nil {
   817  				e := make(map[string]interface{})
   818  				if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil {
   819  					e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)
   820  				}
   821  				rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e})
   822  			}
   823  			//// transition
   824  			if len(lifecycleRule.Transitions) > 0 {
   825  				transitions := make([]interface{}, 0, len(lifecycleRule.Transitions))
   826  				for _, v := range lifecycleRule.Transitions {
   827  					t := make(map[string]interface{})
   828  					if v.Date != nil {
   829  						t["date"] = (*v.Date).Format("2006-01-02")
   830  					}
   831  					if v.Days != nil {
   832  						t["days"] = int(*v.Days)
   833  					}
   834  					if v.StorageClass != nil {
   835  						t["storage_class"] = *v.StorageClass
   836  					}
   837  					transitions = append(transitions, t)
   838  				}
   839  				rule["transition"] = schema.NewSet(transitionHash, transitions)
   840  			}
   841  			// noncurrent_version_transition
   842  			if len(lifecycleRule.NoncurrentVersionTransitions) > 0 {
   843  				transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions))
   844  				for _, v := range lifecycleRule.NoncurrentVersionTransitions {
   845  					t := make(map[string]interface{})
   846  					if v.NoncurrentDays != nil {
   847  						t["days"] = int(*v.NoncurrentDays)
   848  					}
   849  					if v.StorageClass != nil {
   850  						t["storage_class"] = *v.StorageClass
   851  					}
   852  					transitions = append(transitions, t)
   853  				}
   854  				rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions)
   855  			}
   856  
   857  			rules = append(rules, rule)
   858  		}
   859  
   860  		if err := d.Set("lifecycle_rule", rules); err != nil {
   861  			return err
   862  		}
   863  	}
   864  
   865  	// Read the bucket replication configuration
   866  	replication, err := s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{
   867  		Bucket: aws.String(d.Id()),
   868  	})
   869  	if err != nil {
   870  		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
   871  			return err
   872  		}
   873  	}
   874  
   875  	log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication)
   876  	if r := replication.ReplicationConfiguration; r != nil {
   877  		if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil {
   878  			log.Printf("[DEBUG] Error setting replication configuration: %s", err)
   879  			return err
   880  		}
   881  	}
   882  
   883  	// Add the region as an attribute
   884  	location, err := s3conn.GetBucketLocation(
   885  		&s3.GetBucketLocationInput{
   886  			Bucket: aws.String(d.Id()),
   887  		},
   888  	)
   889  	if err != nil {
   890  		return err
   891  	}
   892  	var region string
   893  	if location.LocationConstraint != nil {
   894  		region = *location.LocationConstraint
   895  	}
   896  	region = normalizeRegion(region)
   897  	if err := d.Set("region", region); err != nil {
   898  		return err
   899  	}
   900  
   901  	// Add the hosted zone ID for this bucket's region as an attribute
   902  	hostedZoneID := HostedZoneIDForRegion(region)
   903  	if err := d.Set("hosted_zone_id", hostedZoneID); err != nil {
   904  		return err
   905  	}
   906  
   907  	// Add website_endpoint as an attribute
   908  	websiteEndpoint, err := websiteEndpoint(s3conn, d)
   909  	if err != nil {
   910  		return err
   911  	}
   912  	if websiteEndpoint != nil {
   913  		if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil {
   914  			return err
   915  		}
   916  		if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil {
   917  			return err
   918  		}
   919  	}
   920  
   921  	tagSet, err := getTagSetS3(s3conn, d.Id())
   922  	if err != nil {
   923  		return err
   924  	}
   925  
   926  	if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil {
   927  		return err
   928  	}
   929  
   930  	d.Set("arn", fmt.Sprintf("arn:%s:s3:::%s", meta.(*AWSClient).partition, d.Id()))
   931  
   932  	return nil
   933  }
   934  
   935  func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error {
   936  	s3conn := meta.(*AWSClient).s3conn
   937  
   938  	log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id())
   939  	_, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{
   940  		Bucket: aws.String(d.Id()),
   941  	})
   942  	if err != nil {
   943  		ec2err, ok := err.(awserr.Error)
   944  		if ok && ec2err.Code() == "BucketNotEmpty" {
   945  			if d.Get("force_destroy").(bool) {
   946  				// bucket may have things delete them
   947  				log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err)
   948  
   949  				bucket := d.Get("bucket").(string)
   950  				resp, err := s3conn.ListObjectVersions(
   951  					&s3.ListObjectVersionsInput{
   952  						Bucket: aws.String(bucket),
   953  					},
   954  				)
   955  
   956  				if err != nil {
   957  					return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err)
   958  				}
   959  
   960  				objectsToDelete := make([]*s3.ObjectIdentifier, 0)
   961  
   962  				if len(resp.DeleteMarkers) != 0 {
   963  
   964  					for _, v := range resp.DeleteMarkers {
   965  						objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
   966  							Key:       v.Key,
   967  							VersionId: v.VersionId,
   968  						})
   969  					}
   970  				}
   971  
   972  				if len(resp.Versions) != 0 {
   973  					for _, v := range resp.Versions {
   974  						objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
   975  							Key:       v.Key,
   976  							VersionId: v.VersionId,
   977  						})
   978  					}
   979  				}
   980  
   981  				params := &s3.DeleteObjectsInput{
   982  					Bucket: aws.String(bucket),
   983  					Delete: &s3.Delete{
   984  						Objects: objectsToDelete,
   985  					},
   986  				}
   987  
   988  				_, err = s3conn.DeleteObjects(params)
   989  
   990  				if err != nil {
   991  					return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err)
   992  				}
   993  
   994  				// this line recurses until all objects are deleted or an error is returned
   995  				return resourceAwsS3BucketDelete(d, meta)
   996  			}
   997  		}
   998  		return fmt.Errorf("Error deleting S3 Bucket: %s %q", err, d.Get("bucket").(string))
   999  	}
  1000  	return nil
  1001  }
  1002  
  1003  func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1004  	bucket := d.Get("bucket").(string)
  1005  	policy := d.Get("policy").(string)
  1006  
  1007  	if policy != "" {
  1008  		log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy)
  1009  
  1010  		params := &s3.PutBucketPolicyInput{
  1011  			Bucket: aws.String(bucket),
  1012  			Policy: aws.String(policy),
  1013  		}
  1014  
  1015  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1016  			if _, err := s3conn.PutBucketPolicy(params); err != nil {
  1017  				if awserr, ok := err.(awserr.Error); ok {
  1018  					if awserr.Code() == "MalformedPolicy" {
  1019  						return resource.RetryableError(awserr)
  1020  					}
  1021  				}
  1022  				return resource.NonRetryableError(err)
  1023  			}
  1024  			return nil
  1025  		})
  1026  
  1027  		if err != nil {
  1028  			return fmt.Errorf("Error putting S3 policy: %s", err)
  1029  		}
  1030  	} else {
  1031  		log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy)
  1032  		_, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
  1033  			Bucket: aws.String(bucket),
  1034  		})
  1035  
  1036  		if err != nil {
  1037  			return fmt.Errorf("Error deleting S3 policy: %s", err)
  1038  		}
  1039  	}
  1040  
  1041  	return nil
  1042  }
  1043  
  1044  func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1045  	bucket := d.Get("bucket").(string)
  1046  	rawCors := d.Get("cors_rule").([]interface{})
  1047  
  1048  	if len(rawCors) == 0 {
  1049  		// Delete CORS
  1050  		log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket)
  1051  		_, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{
  1052  			Bucket: aws.String(bucket),
  1053  		})
  1054  		if err != nil {
  1055  			return fmt.Errorf("Error deleting S3 CORS: %s", err)
  1056  		}
  1057  	} else {
  1058  		// Put CORS
  1059  		rules := make([]*s3.CORSRule, 0, len(rawCors))
  1060  		for _, cors := range rawCors {
  1061  			corsMap := cors.(map[string]interface{})
  1062  			r := &s3.CORSRule{}
  1063  			for k, v := range corsMap {
  1064  				log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v)
  1065  				if k == "max_age_seconds" {
  1066  					r.MaxAgeSeconds = aws.Int64(int64(v.(int)))
  1067  				} else {
  1068  					vMap := make([]*string, len(v.([]interface{})))
  1069  					for i, vv := range v.([]interface{}) {
  1070  						str := vv.(string)
  1071  						vMap[i] = aws.String(str)
  1072  					}
  1073  					switch k {
  1074  					case "allowed_headers":
  1075  						r.AllowedHeaders = vMap
  1076  					case "allowed_methods":
  1077  						r.AllowedMethods = vMap
  1078  					case "allowed_origins":
  1079  						r.AllowedOrigins = vMap
  1080  					case "expose_headers":
  1081  						r.ExposeHeaders = vMap
  1082  					}
  1083  				}
  1084  			}
  1085  			rules = append(rules, r)
  1086  		}
  1087  		corsInput := &s3.PutBucketCorsInput{
  1088  			Bucket: aws.String(bucket),
  1089  			CORSConfiguration: &s3.CORSConfiguration{
  1090  				CORSRules: rules,
  1091  			},
  1092  		}
  1093  		log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput)
  1094  		_, err := s3conn.PutBucketCors(corsInput)
  1095  		if err != nil {
  1096  			return fmt.Errorf("Error putting S3 CORS: %s", err)
  1097  		}
  1098  	}
  1099  
  1100  	return nil
  1101  }
  1102  
  1103  func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1104  	ws := d.Get("website").([]interface{})
  1105  
  1106  	if len(ws) == 1 {
  1107  		var w map[string]interface{}
  1108  		if ws[0] != nil {
  1109  			w = ws[0].(map[string]interface{})
  1110  		} else {
  1111  			w = make(map[string]interface{})
  1112  		}
  1113  		return resourceAwsS3BucketWebsitePut(s3conn, d, w)
  1114  	} else if len(ws) == 0 {
  1115  		return resourceAwsS3BucketWebsiteDelete(s3conn, d)
  1116  	} else {
  1117  		return fmt.Errorf("Cannot specify more than one website.")
  1118  	}
  1119  }
  1120  
  1121  func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error {
  1122  	bucket := d.Get("bucket").(string)
  1123  
  1124  	var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string
  1125  	if v, ok := website["index_document"]; ok {
  1126  		indexDocument = v.(string)
  1127  	}
  1128  	if v, ok := website["error_document"]; ok {
  1129  		errorDocument = v.(string)
  1130  	}
  1131  	if v, ok := website["redirect_all_requests_to"]; ok {
  1132  		redirectAllRequestsTo = v.(string)
  1133  	}
  1134  	if v, ok := website["routing_rules"]; ok {
  1135  		routingRules = v.(string)
  1136  	}
  1137  
  1138  	if indexDocument == "" && redirectAllRequestsTo == "" {
  1139  		return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.")
  1140  	}
  1141  
  1142  	websiteConfiguration := &s3.WebsiteConfiguration{}
  1143  
  1144  	if indexDocument != "" {
  1145  		websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)}
  1146  	}
  1147  
  1148  	if errorDocument != "" {
  1149  		websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)}
  1150  	}
  1151  
  1152  	if redirectAllRequestsTo != "" {
  1153  		redirect, err := url.Parse(redirectAllRequestsTo)
  1154  		if err == nil && redirect.Scheme != "" {
  1155  			var redirectHostBuf bytes.Buffer
  1156  			redirectHostBuf.WriteString(redirect.Host)
  1157  			if redirect.Path != "" {
  1158  				redirectHostBuf.WriteString(redirect.Path)
  1159  			}
  1160  			websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)}
  1161  		} else {
  1162  			websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)}
  1163  		}
  1164  	}
  1165  
  1166  	if routingRules != "" {
  1167  		var unmarshaledRules []*s3.RoutingRule
  1168  		if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil {
  1169  			return err
  1170  		}
  1171  		websiteConfiguration.RoutingRules = unmarshaledRules
  1172  	}
  1173  
  1174  	putInput := &s3.PutBucketWebsiteInput{
  1175  		Bucket:               aws.String(bucket),
  1176  		WebsiteConfiguration: websiteConfiguration,
  1177  	}
  1178  
  1179  	log.Printf("[DEBUG] S3 put bucket website: %#v", putInput)
  1180  
  1181  	_, err := s3conn.PutBucketWebsite(putInput)
  1182  	if err != nil {
  1183  		return fmt.Errorf("Error putting S3 website: %s", err)
  1184  	}
  1185  
  1186  	return nil
  1187  }
  1188  
  1189  func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error {
  1190  	bucket := d.Get("bucket").(string)
  1191  	deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)}
  1192  
  1193  	log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput)
  1194  
  1195  	_, err := s3conn.DeleteBucketWebsite(deleteInput)
  1196  	if err != nil {
  1197  		return fmt.Errorf("Error deleting S3 website: %s", err)
  1198  	}
  1199  
  1200  	d.Set("website_endpoint", "")
  1201  	d.Set("website_domain", "")
  1202  
  1203  	return nil
  1204  }
  1205  
  1206  func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) {
  1207  	// If the bucket doesn't have a website configuration, return an empty
  1208  	// endpoint
  1209  	if _, ok := d.GetOk("website"); !ok {
  1210  		return nil, nil
  1211  	}
  1212  
  1213  	bucket := d.Get("bucket").(string)
  1214  
  1215  	// Lookup the region for this bucket
  1216  	location, err := s3conn.GetBucketLocation(
  1217  		&s3.GetBucketLocationInput{
  1218  			Bucket: aws.String(bucket),
  1219  		},
  1220  	)
  1221  	if err != nil {
  1222  		return nil, err
  1223  	}
  1224  	var region string
  1225  	if location.LocationConstraint != nil {
  1226  		region = *location.LocationConstraint
  1227  	}
  1228  
  1229  	return WebsiteEndpoint(bucket, region), nil
  1230  }
  1231  
  1232  func bucketDomainName(bucket string) string {
  1233  	return fmt.Sprintf("%s.s3.amazonaws.com", bucket)
  1234  }
  1235  
  1236  func WebsiteEndpoint(bucket string, region string) *S3Website {
  1237  	domain := WebsiteDomainUrl(region)
  1238  	return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain}
  1239  }
  1240  
  1241  func WebsiteDomainUrl(region string) string {
  1242  	region = normalizeRegion(region)
  1243  
  1244  	// New regions uses different syntax for website endpoints
  1245  	// http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
  1246  	if isOldRegion(region) {
  1247  		return fmt.Sprintf("s3-website-%s.amazonaws.com", region)
  1248  	}
  1249  	return fmt.Sprintf("s3-website.%s.amazonaws.com", region)
  1250  }
  1251  
  1252  func isOldRegion(region string) bool {
  1253  	oldRegions := []string{
  1254  		"ap-northeast-1",
  1255  		"ap-southeast-1",
  1256  		"ap-southeast-2",
  1257  		"eu-west-1",
  1258  		"sa-east-1",
  1259  		"us-east-1",
  1260  		"us-gov-west-1",
  1261  		"us-west-1",
  1262  		"us-west-2",
  1263  	}
  1264  	for _, r := range oldRegions {
  1265  		if region == r {
  1266  			return true
  1267  		}
  1268  	}
  1269  	return false
  1270  }
  1271  
  1272  func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1273  	acl := d.Get("acl").(string)
  1274  	bucket := d.Get("bucket").(string)
  1275  
  1276  	i := &s3.PutBucketAclInput{
  1277  		Bucket: aws.String(bucket),
  1278  		ACL:    aws.String(acl),
  1279  	}
  1280  	log.Printf("[DEBUG] S3 put bucket ACL: %#v", i)
  1281  
  1282  	_, err := s3conn.PutBucketAcl(i)
  1283  	if err != nil {
  1284  		return fmt.Errorf("Error putting S3 ACL: %s", err)
  1285  	}
  1286  
  1287  	return nil
  1288  }
  1289  
  1290  func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1291  	v := d.Get("versioning").([]interface{})
  1292  	bucket := d.Get("bucket").(string)
  1293  	vc := &s3.VersioningConfiguration{}
  1294  
  1295  	if len(v) > 0 {
  1296  		c := v[0].(map[string]interface{})
  1297  
  1298  		if c["enabled"].(bool) {
  1299  			vc.Status = aws.String(s3.BucketVersioningStatusEnabled)
  1300  		} else {
  1301  			vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
  1302  		}
  1303  
  1304  		if c["mfa_delete"].(bool) {
  1305  			vc.MFADelete = aws.String(s3.MFADeleteEnabled)
  1306  		} else {
  1307  			vc.MFADelete = aws.String(s3.MFADeleteDisabled)
  1308  		}
  1309  
  1310  	} else {
  1311  		vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
  1312  	}
  1313  
  1314  	i := &s3.PutBucketVersioningInput{
  1315  		Bucket:                  aws.String(bucket),
  1316  		VersioningConfiguration: vc,
  1317  	}
  1318  	log.Printf("[DEBUG] S3 put bucket versioning: %#v", i)
  1319  
  1320  	_, err := s3conn.PutBucketVersioning(i)
  1321  	if err != nil {
  1322  		return fmt.Errorf("Error putting S3 versioning: %s", err)
  1323  	}
  1324  
  1325  	return nil
  1326  }
  1327  
  1328  func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1329  	logging := d.Get("logging").(*schema.Set).List()
  1330  	bucket := d.Get("bucket").(string)
  1331  	loggingStatus := &s3.BucketLoggingStatus{}
  1332  
  1333  	if len(logging) > 0 {
  1334  		c := logging[0].(map[string]interface{})
  1335  
  1336  		loggingEnabled := &s3.LoggingEnabled{}
  1337  		if val, ok := c["target_bucket"]; ok {
  1338  			loggingEnabled.TargetBucket = aws.String(val.(string))
  1339  		}
  1340  		if val, ok := c["target_prefix"]; ok {
  1341  			loggingEnabled.TargetPrefix = aws.String(val.(string))
  1342  		}
  1343  
  1344  		loggingStatus.LoggingEnabled = loggingEnabled
  1345  	}
  1346  
  1347  	i := &s3.PutBucketLoggingInput{
  1348  		Bucket:              aws.String(bucket),
  1349  		BucketLoggingStatus: loggingStatus,
  1350  	}
  1351  	log.Printf("[DEBUG] S3 put bucket logging: %#v", i)
  1352  
  1353  	_, err := s3conn.PutBucketLogging(i)
  1354  	if err != nil {
  1355  		return fmt.Errorf("Error putting S3 logging: %s", err)
  1356  	}
  1357  
  1358  	return nil
  1359  }
  1360  
  1361  func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1362  	bucket := d.Get("bucket").(string)
  1363  	enableAcceleration := d.Get("acceleration_status").(string)
  1364  
  1365  	i := &s3.PutBucketAccelerateConfigurationInput{
  1366  		Bucket: aws.String(bucket),
  1367  		AccelerateConfiguration: &s3.AccelerateConfiguration{
  1368  			Status: aws.String(enableAcceleration),
  1369  		},
  1370  	}
  1371  	log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i)
  1372  
  1373  	_, err := s3conn.PutBucketAccelerateConfiguration(i)
  1374  	if err != nil {
  1375  		return fmt.Errorf("Error putting S3 acceleration: %s", err)
  1376  	}
  1377  
  1378  	return nil
  1379  }
  1380  
  1381  func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1382  	bucket := d.Get("bucket").(string)
  1383  	payer := d.Get("request_payer").(string)
  1384  
  1385  	i := &s3.PutBucketRequestPaymentInput{
  1386  		Bucket: aws.String(bucket),
  1387  		RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{
  1388  			Payer: aws.String(payer),
  1389  		},
  1390  	}
  1391  	log.Printf("[DEBUG] S3 put bucket request payer: %#v", i)
  1392  
  1393  	_, err := s3conn.PutBucketRequestPayment(i)
  1394  	if err != nil {
  1395  		return fmt.Errorf("Error putting S3 request payer: %s", err)
  1396  	}
  1397  
  1398  	return nil
  1399  }
  1400  
  1401  func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1402  	bucket := d.Get("bucket").(string)
  1403  	replicationConfiguration := d.Get("replication_configuration").([]interface{})
  1404  
  1405  	if len(replicationConfiguration) == 0 {
  1406  		i := &s3.DeleteBucketReplicationInput{
  1407  			Bucket: aws.String(bucket),
  1408  		}
  1409  
  1410  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1411  			if _, err := s3conn.DeleteBucketReplication(i); err != nil {
  1412  				return resource.NonRetryableError(err)
  1413  			}
  1414  			return nil
  1415  		})
  1416  		if err != nil {
  1417  			return fmt.Errorf("Error removing S3 bucket replication: %s", err)
  1418  		}
  1419  		return nil
  1420  	}
  1421  
  1422  	hasVersioning := false
  1423  	// Validate that bucket versioning is enabled
  1424  	if versioning, ok := d.GetOk("versioning"); ok {
  1425  		v := versioning.([]interface{})
  1426  
  1427  		if v[0].(map[string]interface{})["enabled"].(bool) {
  1428  			hasVersioning = true
  1429  		}
  1430  	}
  1431  
  1432  	if !hasVersioning {
  1433  		return fmt.Errorf("versioning must be enabled to allow S3 bucket replication")
  1434  	}
  1435  
  1436  	c := replicationConfiguration[0].(map[string]interface{})
  1437  
  1438  	rc := &s3.ReplicationConfiguration{}
  1439  	if val, ok := c["role"]; ok {
  1440  		rc.Role = aws.String(val.(string))
  1441  	}
  1442  
  1443  	rcRules := c["rules"].(*schema.Set).List()
  1444  	rules := []*s3.ReplicationRule{}
  1445  	for _, v := range rcRules {
  1446  		rr := v.(map[string]interface{})
  1447  		rcRule := &s3.ReplicationRule{
  1448  			Prefix: aws.String(rr["prefix"].(string)),
  1449  			Status: aws.String(rr["status"].(string)),
  1450  		}
  1451  
  1452  		if rrid, ok := rr["id"]; ok {
  1453  			rcRule.ID = aws.String(rrid.(string))
  1454  		}
  1455  
  1456  		ruleDestination := &s3.Destination{}
  1457  		if destination, ok := rr["destination"]; ok {
  1458  			dest := destination.(*schema.Set).List()
  1459  
  1460  			bd := dest[0].(map[string]interface{})
  1461  			ruleDestination.Bucket = aws.String(bd["bucket"].(string))
  1462  
  1463  			if storageClass, ok := bd["storage_class"]; ok && storageClass != "" {
  1464  				ruleDestination.StorageClass = aws.String(storageClass.(string))
  1465  			}
  1466  		}
  1467  		rcRule.Destination = ruleDestination
  1468  		rules = append(rules, rcRule)
  1469  	}
  1470  
  1471  	rc.Rules = rules
  1472  	i := &s3.PutBucketReplicationInput{
  1473  		Bucket: aws.String(bucket),
  1474  		ReplicationConfiguration: rc,
  1475  	}
  1476  	log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i)
  1477  
  1478  	_, err := s3conn.PutBucketReplication(i)
  1479  	if err != nil {
  1480  		return fmt.Errorf("Error putting S3 replication configuration: %s", err)
  1481  	}
  1482  
  1483  	return nil
  1484  }
  1485  
  1486  func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
  1487  	bucket := d.Get("bucket").(string)
  1488  
  1489  	lifecycleRules := d.Get("lifecycle_rule").([]interface{})
  1490  
  1491  	if len(lifecycleRules) == 0 {
  1492  		i := &s3.DeleteBucketLifecycleInput{
  1493  			Bucket: aws.String(bucket),
  1494  		}
  1495  
  1496  		err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1497  			if _, err := s3conn.DeleteBucketLifecycle(i); err != nil {
  1498  				return resource.NonRetryableError(err)
  1499  			}
  1500  			return nil
  1501  		})
  1502  		if err != nil {
  1503  			return fmt.Errorf("Error removing S3 lifecycle: %s", err)
  1504  		}
  1505  		return nil
  1506  	}
  1507  
  1508  	rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules))
  1509  
  1510  	for i, lifecycleRule := range lifecycleRules {
  1511  		r := lifecycleRule.(map[string]interface{})
  1512  
  1513  		rule := &s3.LifecycleRule{
  1514  			Prefix: aws.String(r["prefix"].(string)),
  1515  		}
  1516  
  1517  		// ID
  1518  		if val, ok := r["id"].(string); ok && val != "" {
  1519  			rule.ID = aws.String(val)
  1520  		} else {
  1521  			rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-"))
  1522  		}
  1523  
  1524  		// Enabled
  1525  		if val, ok := r["enabled"].(bool); ok && val {
  1526  			rule.Status = aws.String(s3.ExpirationStatusEnabled)
  1527  		} else {
  1528  			rule.Status = aws.String(s3.ExpirationStatusDisabled)
  1529  		}
  1530  
  1531  		// AbortIncompleteMultipartUpload
  1532  		if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 {
  1533  			rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{
  1534  				DaysAfterInitiation: aws.Int64(int64(val)),
  1535  			}
  1536  		}
  1537  
  1538  		// Expiration
  1539  		expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List()
  1540  		if len(expiration) > 0 {
  1541  			e := expiration[0].(map[string]interface{})
  1542  			i := &s3.LifecycleExpiration{}
  1543  
  1544  			if val, ok := e["date"].(string); ok && val != "" {
  1545  				t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
  1546  				if err != nil {
  1547  					return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
  1548  				}
  1549  				i.Date = aws.Time(t)
  1550  			} else if val, ok := e["days"].(int); ok && val > 0 {
  1551  				i.Days = aws.Int64(int64(val))
  1552  			} else if val, ok := e["expired_object_delete_marker"].(bool); ok {
  1553  				i.ExpiredObjectDeleteMarker = aws.Bool(val)
  1554  			}
  1555  			rule.Expiration = i
  1556  		}
  1557  
  1558  		// NoncurrentVersionExpiration
  1559  		nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List()
  1560  		if len(nc_expiration) > 0 {
  1561  			e := nc_expiration[0].(map[string]interface{})
  1562  
  1563  			if val, ok := e["days"].(int); ok && val > 0 {
  1564  				rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{
  1565  					NoncurrentDays: aws.Int64(int64(val)),
  1566  				}
  1567  			}
  1568  		}
  1569  
  1570  		// Transitions
  1571  		transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List()
  1572  		if len(transitions) > 0 {
  1573  			rule.Transitions = make([]*s3.Transition, 0, len(transitions))
  1574  			for _, transition := range transitions {
  1575  				transition := transition.(map[string]interface{})
  1576  				i := &s3.Transition{}
  1577  				if val, ok := transition["date"].(string); ok && val != "" {
  1578  					t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
  1579  					if err != nil {
  1580  						return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
  1581  					}
  1582  					i.Date = aws.Time(t)
  1583  				} else if val, ok := transition["days"].(int); ok && val > 0 {
  1584  					i.Days = aws.Int64(int64(val))
  1585  				}
  1586  				if val, ok := transition["storage_class"].(string); ok && val != "" {
  1587  					i.StorageClass = aws.String(val)
  1588  				}
  1589  
  1590  				rule.Transitions = append(rule.Transitions, i)
  1591  			}
  1592  		}
  1593  		// NoncurrentVersionTransitions
  1594  		nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List()
  1595  		if len(nc_transitions) > 0 {
  1596  			rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions))
  1597  			for _, transition := range nc_transitions {
  1598  				transition := transition.(map[string]interface{})
  1599  				i := &s3.NoncurrentVersionTransition{}
  1600  				if val, ok := transition["days"].(int); ok && val > 0 {
  1601  					i.NoncurrentDays = aws.Int64(int64(val))
  1602  				}
  1603  				if val, ok := transition["storage_class"].(string); ok && val != "" {
  1604  					i.StorageClass = aws.String(val)
  1605  				}
  1606  
  1607  				rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i)
  1608  			}
  1609  		}
  1610  
  1611  		rules = append(rules, rule)
  1612  	}
  1613  
  1614  	i := &s3.PutBucketLifecycleConfigurationInput{
  1615  		Bucket: aws.String(bucket),
  1616  		LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
  1617  			Rules: rules,
  1618  		},
  1619  	}
  1620  
  1621  	err := resource.Retry(1*time.Minute, func() *resource.RetryError {
  1622  		if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil {
  1623  			return resource.NonRetryableError(err)
  1624  		}
  1625  		return nil
  1626  	})
  1627  	if err != nil {
  1628  		return fmt.Errorf("Error putting S3 lifecycle: %s", err)
  1629  	}
  1630  
  1631  	return nil
  1632  }
  1633  
  1634  func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration) []map[string]interface{} {
  1635  	replication_configuration := make([]map[string]interface{}, 0, 1)
  1636  	m := make(map[string]interface{})
  1637  
  1638  	if r.Role != nil && *r.Role != "" {
  1639  		m["role"] = *r.Role
  1640  	}
  1641  
  1642  	rules := make([]interface{}, 0, len(r.Rules))
  1643  	for _, v := range r.Rules {
  1644  		t := make(map[string]interface{})
  1645  		if v.Destination != nil {
  1646  			rd := make(map[string]interface{})
  1647  			if v.Destination.Bucket != nil {
  1648  				rd["bucket"] = *v.Destination.Bucket
  1649  			}
  1650  			if v.Destination.StorageClass != nil {
  1651  				rd["storage_class"] = *v.Destination.StorageClass
  1652  			}
  1653  			t["destination"] = schema.NewSet(destinationHash, []interface{}{rd})
  1654  		}
  1655  
  1656  		if v.ID != nil {
  1657  			t["id"] = *v.ID
  1658  		}
  1659  		if v.Prefix != nil {
  1660  			t["prefix"] = *v.Prefix
  1661  		}
  1662  		if v.Status != nil {
  1663  			t["status"] = *v.Status
  1664  		}
  1665  		rules = append(rules, t)
  1666  	}
  1667  	m["rules"] = schema.NewSet(rulesHash, rules)
  1668  
  1669  	replication_configuration = append(replication_configuration, m)
  1670  
  1671  	return replication_configuration
  1672  }
  1673  
  1674  func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) {
  1675  	withNulls, err := json.Marshal(w)
  1676  	if err != nil {
  1677  		return "", err
  1678  	}
  1679  
  1680  	var rules []map[string]interface{}
  1681  	if err := json.Unmarshal(withNulls, &rules); err != nil {
  1682  		return "", err
  1683  	}
  1684  
  1685  	var cleanRules []map[string]interface{}
  1686  	for _, rule := range rules {
  1687  		cleanRules = append(cleanRules, removeNil(rule))
  1688  	}
  1689  
  1690  	withoutNulls, err := json.Marshal(cleanRules)
  1691  	if err != nil {
  1692  		return "", err
  1693  	}
  1694  
  1695  	return string(withoutNulls), nil
  1696  }
  1697  
  1698  func removeNil(data map[string]interface{}) map[string]interface{} {
  1699  	withoutNil := make(map[string]interface{})
  1700  
  1701  	for k, v := range data {
  1702  		if v == nil {
  1703  			continue
  1704  		}
  1705  
  1706  		switch v.(type) {
  1707  		case map[string]interface{}:
  1708  			withoutNil[k] = removeNil(v.(map[string]interface{}))
  1709  		default:
  1710  			withoutNil[k] = v
  1711  		}
  1712  	}
  1713  
  1714  	return withoutNil
  1715  }
  1716  
  1717  // DEPRECATED. Please consider using `normalizeJsonString` function instead.
  1718  func normalizeJson(jsonString interface{}) string {
  1719  	if jsonString == nil || jsonString == "" {
  1720  		return ""
  1721  	}
  1722  	var j interface{}
  1723  	err := json.Unmarshal([]byte(jsonString.(string)), &j)
  1724  	if err != nil {
  1725  		return fmt.Sprintf("Error parsing JSON: %s", err)
  1726  	}
  1727  	b, _ := json.Marshal(j)
  1728  	return string(b[:])
  1729  }
  1730  
  1731  func normalizeRegion(region string) string {
  1732  	// Default to us-east-1 if the bucket doesn't have a region:
  1733  	// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
  1734  	if region == "" {
  1735  		region = "us-east-1"
  1736  	}
  1737  
  1738  	return region
  1739  }
  1740  
  1741  func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) {
  1742  	validTypes := map[string]struct{}{
  1743  		"Enabled":   struct{}{},
  1744  		"Suspended": struct{}{},
  1745  	}
  1746  
  1747  	if _, ok := validTypes[v.(string)]; !ok {
  1748  		errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended"))
  1749  	}
  1750  	return
  1751  }
  1752  
  1753  func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) {
  1754  	value := v.(string)
  1755  	if value != s3.PayerRequester && value != s3.PayerBucketOwner {
  1756  		errors = append(errors, fmt.Errorf(
  1757  			"%q contains an invalid Request Payer type %q. Valid types are either %q or %q",
  1758  			k, value, s3.PayerRequester, s3.PayerBucketOwner))
  1759  	}
  1760  	return
  1761  }
  1762  
  1763  // validateS3BucketName validates any S3 bucket name that is not inside the us-east-1 region.
  1764  // Buckets outside of this region have to be DNS-compliant. After the same restrictions are
  1765  // applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc
  1766  func validateS3BucketName(value string, region string) error {
  1767  	if region != "us-east-1" {
  1768  		if (len(value) < 3) || (len(value) > 63) {
  1769  			return fmt.Errorf("%q must contain from 3 to 63 characters", value)
  1770  		}
  1771  		if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) {
  1772  			return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value)
  1773  		}
  1774  		if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) {
  1775  			return fmt.Errorf("%q must not be formatted as an IP address", value)
  1776  		}
  1777  		if strings.HasPrefix(value, `.`) {
  1778  			return fmt.Errorf("%q cannot start with a period", value)
  1779  		}
  1780  		if strings.HasSuffix(value, `.`) {
  1781  			return fmt.Errorf("%q cannot end with a period", value)
  1782  		}
  1783  		if strings.Contains(value, `..`) {
  1784  			return fmt.Errorf("%q can be only one period between labels", value)
  1785  		}
  1786  	} else {
  1787  		if len(value) > 255 {
  1788  			return fmt.Errorf("%q must contain less than 256 characters", value)
  1789  		}
  1790  		if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) {
  1791  			return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value)
  1792  		}
  1793  	}
  1794  	return nil
  1795  }
  1796  
  1797  func expirationHash(v interface{}) int {
  1798  	var buf bytes.Buffer
  1799  	m := v.(map[string]interface{})
  1800  	if v, ok := m["date"]; ok {
  1801  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1802  	}
  1803  	if v, ok := m["days"]; ok {
  1804  		buf.WriteString(fmt.Sprintf("%d-", v.(int)))
  1805  	}
  1806  	if v, ok := m["expired_object_delete_marker"]; ok {
  1807  		buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
  1808  	}
  1809  	return hashcode.String(buf.String())
  1810  }
  1811  
  1812  func transitionHash(v interface{}) int {
  1813  	var buf bytes.Buffer
  1814  	m := v.(map[string]interface{})
  1815  	if v, ok := m["date"]; ok {
  1816  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1817  	}
  1818  	if v, ok := m["days"]; ok {
  1819  		buf.WriteString(fmt.Sprintf("%d-", v.(int)))
  1820  	}
  1821  	if v, ok := m["storage_class"]; ok {
  1822  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1823  	}
  1824  	return hashcode.String(buf.String())
  1825  }
  1826  
  1827  func rulesHash(v interface{}) int {
  1828  	var buf bytes.Buffer
  1829  	m := v.(map[string]interface{})
  1830  
  1831  	if v, ok := m["id"]; ok {
  1832  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1833  	}
  1834  	if v, ok := m["prefix"]; ok {
  1835  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1836  	}
  1837  	if v, ok := m["status"]; ok {
  1838  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1839  	}
  1840  	return hashcode.String(buf.String())
  1841  }
  1842  
  1843  func destinationHash(v interface{}) int {
  1844  	var buf bytes.Buffer
  1845  	m := v.(map[string]interface{})
  1846  
  1847  	if v, ok := m["bucket"]; ok {
  1848  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1849  	}
  1850  	if v, ok := m["storage_class"]; ok {
  1851  		buf.WriteString(fmt.Sprintf("%s-", v.(string)))
  1852  	}
  1853  	return hashcode.String(buf.String())
  1854  }
  1855  
  1856  type S3Website struct {
  1857  	Endpoint, Domain string
  1858  }