github.com/recobe182/terraform@v0.8.5-0.20170117231232-49ab22a935b7/builtin/providers/aws/resource_aws_s3_bucket_object.go (about)

     1  package aws
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"io"
     7  	"log"
     8  	"os"
     9  	"sort"
    10  	"strings"
    11  
    12  	"github.com/hashicorp/terraform/helper/schema"
    13  	"github.com/mitchellh/go-homedir"
    14  
    15  	"github.com/aws/aws-sdk-go/aws"
    16  	"github.com/aws/aws-sdk-go/aws/awserr"
    17  	"github.com/aws/aws-sdk-go/service/s3"
    18  )
    19  
    20  func resourceAwsS3BucketObject() *schema.Resource {
    21  	return &schema.Resource{
    22  		Create: resourceAwsS3BucketObjectPut,
    23  		Read:   resourceAwsS3BucketObjectRead,
    24  		Update: resourceAwsS3BucketObjectPut,
    25  		Delete: resourceAwsS3BucketObjectDelete,
    26  
    27  		Schema: map[string]*schema.Schema{
    28  			"bucket": {
    29  				Type:     schema.TypeString,
    30  				Required: true,
    31  				ForceNew: true,
    32  			},
    33  
    34  			"acl": {
    35  				Type:         schema.TypeString,
    36  				Default:      "private",
    37  				Optional:     true,
    38  				ValidateFunc: validateS3BucketObjectAclType,
    39  			},
    40  
    41  			"cache_control": {
    42  				Type:     schema.TypeString,
    43  				Optional: true,
    44  			},
    45  
    46  			"content_disposition": {
    47  				Type:     schema.TypeString,
    48  				Optional: true,
    49  			},
    50  
    51  			"content_encoding": {
    52  				Type:     schema.TypeString,
    53  				Optional: true,
    54  			},
    55  
    56  			"content_language": {
    57  				Type:     schema.TypeString,
    58  				Optional: true,
    59  			},
    60  
    61  			"content_type": {
    62  				Type:     schema.TypeString,
    63  				Optional: true,
    64  				Computed: true,
    65  			},
    66  
    67  			"key": {
    68  				Type:     schema.TypeString,
    69  				Required: true,
    70  				ForceNew: true,
    71  			},
    72  
    73  			"source": {
    74  				Type:          schema.TypeString,
    75  				Optional:      true,
    76  				ConflictsWith: []string{"content"},
    77  			},
    78  
    79  			"content": {
    80  				Type:          schema.TypeString,
    81  				Optional:      true,
    82  				ConflictsWith: []string{"source"},
    83  			},
    84  
    85  			"storage_class": {
    86  				Type:         schema.TypeString,
    87  				Optional:     true,
    88  				Computed:     true,
    89  				ValidateFunc: validateS3BucketObjectStorageClassType,
    90  			},
    91  
    92  			"kms_key_id": {
    93  				Type:         schema.TypeString,
    94  				Optional:     true,
    95  				ValidateFunc: validateArn,
    96  			},
    97  
    98  			"etag": {
    99  				Type: schema.TypeString,
   100  				// This will conflict with SSE-C and SSE-KMS encryption and multi-part upload
   101  				// if/when it's actually implemented. The Etag then won't match raw-file MD5.
   102  				// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
   103  				Optional:      true,
   104  				Computed:      true,
   105  				ConflictsWith: []string{"kms_key_id"},
   106  			},
   107  
   108  			"version_id": {
   109  				Type:     schema.TypeString,
   110  				Computed: true,
   111  			},
   112  		},
   113  	}
   114  }
   115  
   116  func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) error {
   117  	s3conn := meta.(*AWSClient).s3conn
   118  
   119  	var body io.ReadSeeker
   120  
   121  	if v, ok := d.GetOk("source"); ok {
   122  		source := v.(string)
   123  		path, err := homedir.Expand(source)
   124  		if err != nil {
   125  			return fmt.Errorf("Error expanding homedir in source (%s): %s", source, err)
   126  		}
   127  		file, err := os.Open(path)
   128  		if err != nil {
   129  			return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err)
   130  		}
   131  
   132  		body = file
   133  	} else if v, ok := d.GetOk("content"); ok {
   134  		content := v.(string)
   135  		body = bytes.NewReader([]byte(content))
   136  	} else {
   137  		return fmt.Errorf("Must specify \"source\" or \"content\" field")
   138  	}
   139  
   140  	bucket := d.Get("bucket").(string)
   141  	key := d.Get("key").(string)
   142  
   143  	putInput := &s3.PutObjectInput{
   144  		Bucket: aws.String(bucket),
   145  		Key:    aws.String(key),
   146  		ACL:    aws.String(d.Get("acl").(string)),
   147  		Body:   body,
   148  	}
   149  
   150  	if v, ok := d.GetOk("storage_class"); ok {
   151  		putInput.StorageClass = aws.String(v.(string))
   152  	}
   153  
   154  	if v, ok := d.GetOk("cache_control"); ok {
   155  		putInput.CacheControl = aws.String(v.(string))
   156  	}
   157  
   158  	if v, ok := d.GetOk("content_type"); ok {
   159  		putInput.ContentType = aws.String(v.(string))
   160  	}
   161  
   162  	if v, ok := d.GetOk("content_encoding"); ok {
   163  		putInput.ContentEncoding = aws.String(v.(string))
   164  	}
   165  
   166  	if v, ok := d.GetOk("content_language"); ok {
   167  		putInput.ContentLanguage = aws.String(v.(string))
   168  	}
   169  
   170  	if v, ok := d.GetOk("content_disposition"); ok {
   171  		putInput.ContentDisposition = aws.String(v.(string))
   172  	}
   173  
   174  	if v, ok := d.GetOk("kms_key_id"); ok {
   175  		putInput.SSEKMSKeyId = aws.String(v.(string))
   176  		putInput.ServerSideEncryption = aws.String("aws:kms")
   177  	}
   178  
   179  	resp, err := s3conn.PutObject(putInput)
   180  	if err != nil {
   181  		return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err)
   182  	}
   183  
   184  	// See https://forums.aws.amazon.com/thread.jspa?threadID=44003
   185  	d.Set("etag", strings.Trim(*resp.ETag, `"`))
   186  
   187  	d.Set("version_id", resp.VersionId)
   188  	d.SetId(key)
   189  	return resourceAwsS3BucketObjectRead(d, meta)
   190  }
   191  
   192  func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error {
   193  	s3conn := meta.(*AWSClient).s3conn
   194  
   195  	bucket := d.Get("bucket").(string)
   196  	key := d.Get("key").(string)
   197  
   198  	resp, err := s3conn.HeadObject(
   199  		&s3.HeadObjectInput{
   200  			Bucket: aws.String(bucket),
   201  			Key:    aws.String(key),
   202  		})
   203  
   204  	if err != nil {
   205  		// If S3 returns a 404 Request Failure, mark the object as destroyed
   206  		if awsErr, ok := err.(awserr.RequestFailure); ok && awsErr.StatusCode() == 404 {
   207  			d.SetId("")
   208  			log.Printf("[WARN] Error Reading Object (%s), object not found (HTTP status 404)", key)
   209  			return nil
   210  		}
   211  		return err
   212  	}
   213  	log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp)
   214  
   215  	d.Set("cache_control", resp.CacheControl)
   216  	d.Set("content_disposition", resp.ContentDisposition)
   217  	d.Set("content_encoding", resp.ContentEncoding)
   218  	d.Set("content_language", resp.ContentLanguage)
   219  	d.Set("content_type", resp.ContentType)
   220  	d.Set("version_id", resp.VersionId)
   221  	d.Set("kms_key_id", resp.SSEKMSKeyId)
   222  	d.Set("etag", strings.Trim(*resp.ETag, `"`))
   223  
   224  	// The "STANDARD" (which is also the default) storage
   225  	// class when set would not be included in the results.
   226  	d.Set("storage_class", s3.StorageClassStandard)
   227  	if resp.StorageClass != nil {
   228  		d.Set("storage_class", resp.StorageClass)
   229  	}
   230  
   231  	return nil
   232  }
   233  
   234  func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) error {
   235  	s3conn := meta.(*AWSClient).s3conn
   236  
   237  	bucket := d.Get("bucket").(string)
   238  	key := d.Get("key").(string)
   239  
   240  	if _, ok := d.GetOk("version_id"); ok {
   241  		// Bucket is versioned, we need to delete all versions
   242  		vInput := s3.ListObjectVersionsInput{
   243  			Bucket: aws.String(bucket),
   244  			Prefix: aws.String(key),
   245  		}
   246  		out, err := s3conn.ListObjectVersions(&vInput)
   247  		if err != nil {
   248  			return fmt.Errorf("Failed listing S3 object versions: %s", err)
   249  		}
   250  
   251  		for _, v := range out.Versions {
   252  			input := s3.DeleteObjectInput{
   253  				Bucket:    aws.String(bucket),
   254  				Key:       aws.String(key),
   255  				VersionId: v.VersionId,
   256  			}
   257  			_, err := s3conn.DeleteObject(&input)
   258  			if err != nil {
   259  				return fmt.Errorf("Error deleting S3 object version of %s:\n %s:\n %s",
   260  					key, v, err)
   261  			}
   262  		}
   263  	} else {
   264  		// Just delete the object
   265  		input := s3.DeleteObjectInput{
   266  			Bucket: aws.String(bucket),
   267  			Key:    aws.String(key),
   268  		}
   269  		_, err := s3conn.DeleteObject(&input)
   270  		if err != nil {
   271  			return fmt.Errorf("Error deleting S3 bucket object: %s", err)
   272  		}
   273  	}
   274  
   275  	return nil
   276  }
   277  
   278  func validateS3BucketObjectAclType(v interface{}, k string) (ws []string, errors []error) {
   279  	value := v.(string)
   280  
   281  	cannedAcls := map[string]bool{
   282  		s3.ObjectCannedACLPrivate:                true,
   283  		s3.ObjectCannedACLPublicRead:             true,
   284  		s3.ObjectCannedACLPublicReadWrite:        true,
   285  		s3.ObjectCannedACLAuthenticatedRead:      true,
   286  		s3.ObjectCannedACLAwsExecRead:            true,
   287  		s3.ObjectCannedACLBucketOwnerRead:        true,
   288  		s3.ObjectCannedACLBucketOwnerFullControl: true,
   289  	}
   290  
   291  	sentenceJoin := func(m map[string]bool) string {
   292  		keys := make([]string, 0, len(m))
   293  		for k := range m {
   294  			keys = append(keys, fmt.Sprintf("%q", k))
   295  		}
   296  		sort.Strings(keys)
   297  
   298  		length := len(keys)
   299  		words := make([]string, length)
   300  		copy(words, keys)
   301  
   302  		words[length-1] = fmt.Sprintf("or %s", words[length-1])
   303  		return strings.Join(words, ", ")
   304  	}
   305  
   306  	if _, ok := cannedAcls[value]; !ok {
   307  		errors = append(errors, fmt.Errorf(
   308  			"%q contains an invalid canned ACL type %q. Valid types are either %s",
   309  			k, value, sentenceJoin(cannedAcls)))
   310  	}
   311  	return
   312  }
   313  
   314  func validateS3BucketObjectStorageClassType(v interface{}, k string) (ws []string, errors []error) {
   315  	value := v.(string)
   316  
   317  	storageClass := map[string]bool{
   318  		s3.StorageClassStandard:          true,
   319  		s3.StorageClassReducedRedundancy: true,
   320  		s3.StorageClassStandardIa:        true,
   321  	}
   322  
   323  	if _, ok := storageClass[value]; !ok {
   324  		errors = append(errors, fmt.Errorf(
   325  			"%q contains an invalid Storage Class type %q. Valid types are either %q, %q, or %q",
   326  			k, value, s3.StorageClassStandard, s3.StorageClassReducedRedundancy,
   327  			s3.StorageClassStandardIa))
   328  	}
   329  	return
   330  }