github.com/mohanarpit/terraform@v0.6.16-0.20160909104007-291f29853544/builtin/providers/aws/resource_aws_s3_bucket_object.go (about)

     1  package aws
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"io"
     7  	"log"
     8  	"os"
     9  	"sort"
    10  	"strings"
    11  
    12  	"github.com/hashicorp/terraform/helper/schema"
    13  	"github.com/mitchellh/go-homedir"
    14  
    15  	"github.com/aws/aws-sdk-go/aws"
    16  	"github.com/aws/aws-sdk-go/aws/awserr"
    17  	"github.com/aws/aws-sdk-go/service/s3"
    18  )
    19  
    20  func resourceAwsS3BucketObject() *schema.Resource {
    21  	return &schema.Resource{
    22  		Create: resourceAwsS3BucketObjectPut,
    23  		Read:   resourceAwsS3BucketObjectRead,
    24  		Update: resourceAwsS3BucketObjectPut,
    25  		Delete: resourceAwsS3BucketObjectDelete,
    26  
    27  		Schema: map[string]*schema.Schema{
    28  			"bucket": &schema.Schema{
    29  				Type:     schema.TypeString,
    30  				Required: true,
    31  				ForceNew: true,
    32  			},
    33  
    34  			"acl": &schema.Schema{
    35  				Type:         schema.TypeString,
    36  				Default:      "private",
    37  				Optional:     true,
    38  				ValidateFunc: validateS3BucketObjectAclType,
    39  			},
    40  
    41  			"cache_control": &schema.Schema{
    42  				Type:     schema.TypeString,
    43  				Optional: true,
    44  			},
    45  
    46  			"content_disposition": &schema.Schema{
    47  				Type:     schema.TypeString,
    48  				Optional: true,
    49  			},
    50  
    51  			"content_encoding": &schema.Schema{
    52  				Type:     schema.TypeString,
    53  				Optional: true,
    54  			},
    55  
    56  			"content_language": &schema.Schema{
    57  				Type:     schema.TypeString,
    58  				Optional: true,
    59  			},
    60  
    61  			"content_type": &schema.Schema{
    62  				Type:     schema.TypeString,
    63  				Optional: true,
    64  				Computed: true,
    65  			},
    66  
    67  			"key": &schema.Schema{
    68  				Type:     schema.TypeString,
    69  				Required: true,
    70  				ForceNew: true,
    71  			},
    72  
    73  			"source": &schema.Schema{
    74  				Type:          schema.TypeString,
    75  				Optional:      true,
    76  				ConflictsWith: []string{"content"},
    77  			},
    78  
    79  			"content": &schema.Schema{
    80  				Type:          schema.TypeString,
    81  				Optional:      true,
    82  				ConflictsWith: []string{"source"},
    83  			},
    84  
    85  			"storage_class": &schema.Schema{
    86  				Type:         schema.TypeString,
    87  				Optional:     true,
    88  				Computed:     true,
    89  				ValidateFunc: validateS3BucketObjectStorageClassType,
    90  			},
    91  
    92  			"kms_key_id": &schema.Schema{
    93  				Type:     schema.TypeString,
    94  				Optional: true,
    95  			},
    96  
    97  			"etag": &schema.Schema{
    98  				Type: schema.TypeString,
    99  				// This will conflict with SSE-C and SSE-KMS encryption and multi-part upload
   100  				// if/when it's actually implemented. The Etag then won't match raw-file MD5.
   101  				// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
   102  				Optional: true,
   103  				Computed: true,
   104  			},
   105  
   106  			"version_id": &schema.Schema{
   107  				Type:     schema.TypeString,
   108  				Computed: true,
   109  			},
   110  		},
   111  	}
   112  }
   113  
   114  func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) error {
   115  	s3conn := meta.(*AWSClient).s3conn
   116  
   117  	var body io.ReadSeeker
   118  
   119  	if v, ok := d.GetOk("source"); ok {
   120  		source := v.(string)
   121  		path, err := homedir.Expand(source)
   122  		if err != nil {
   123  			return fmt.Errorf("Error expanding homedir in source (%s): %s", source, err)
   124  		}
   125  		file, err := os.Open(path)
   126  		if err != nil {
   127  			return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err)
   128  		}
   129  
   130  		body = file
   131  	} else if v, ok := d.GetOk("content"); ok {
   132  		content := v.(string)
   133  		body = bytes.NewReader([]byte(content))
   134  	} else {
   135  		return fmt.Errorf("Must specify \"source\" or \"content\" field")
   136  	}
   137  
   138  	if _, ok := d.GetOk("kms_key_id"); ok {
   139  		if _, ok := d.GetOk("etag"); ok {
   140  			return fmt.Errorf("Unable to specify 'kms_key_id' and 'etag' together because 'etag' wouldn't equal the MD5 digest of the raw object data")
   141  		}
   142  	}
   143  
   144  	bucket := d.Get("bucket").(string)
   145  	key := d.Get("key").(string)
   146  
   147  	putInput := &s3.PutObjectInput{
   148  		Bucket: aws.String(bucket),
   149  		Key:    aws.String(key),
   150  		ACL:    aws.String(d.Get("acl").(string)),
   151  		Body:   body,
   152  	}
   153  
   154  	if v, ok := d.GetOk("storage_class"); ok {
   155  		putInput.StorageClass = aws.String(v.(string))
   156  	}
   157  
   158  	if v, ok := d.GetOk("cache_control"); ok {
   159  		putInput.CacheControl = aws.String(v.(string))
   160  	}
   161  
   162  	if v, ok := d.GetOk("content_type"); ok {
   163  		putInput.ContentType = aws.String(v.(string))
   164  	}
   165  
   166  	if v, ok := d.GetOk("content_encoding"); ok {
   167  		putInput.ContentEncoding = aws.String(v.(string))
   168  	}
   169  
   170  	if v, ok := d.GetOk("content_language"); ok {
   171  		putInput.ContentLanguage = aws.String(v.(string))
   172  	}
   173  
   174  	if v, ok := d.GetOk("content_disposition"); ok {
   175  		putInput.ContentDisposition = aws.String(v.(string))
   176  	}
   177  
   178  	if v, ok := d.GetOk("kms_key_id"); ok {
   179  		putInput.SSEKMSKeyId = aws.String(v.(string))
   180  		putInput.ServerSideEncryption = aws.String("aws:kms")
   181  	}
   182  
   183  	resp, err := s3conn.PutObject(putInput)
   184  	if err != nil {
   185  		return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err)
   186  	}
   187  
   188  	// See https://forums.aws.amazon.com/thread.jspa?threadID=44003
   189  	d.Set("etag", strings.Trim(*resp.ETag, `"`))
   190  
   191  	d.Set("version_id", resp.VersionId)
   192  	d.SetId(key)
   193  	return resourceAwsS3BucketObjectRead(d, meta)
   194  }
   195  
   196  func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error {
   197  	s3conn := meta.(*AWSClient).s3conn
   198  
   199  	bucket := d.Get("bucket").(string)
   200  	key := d.Get("key").(string)
   201  	etag := d.Get("etag").(string)
   202  
   203  	resp, err := s3conn.HeadObject(
   204  		&s3.HeadObjectInput{
   205  			Bucket:  aws.String(bucket),
   206  			Key:     aws.String(key),
   207  			IfMatch: aws.String(etag),
   208  		})
   209  
   210  	if err != nil {
   211  		// If S3 returns a 404 Request Failure, mark the object as destroyed
   212  		if awsErr, ok := err.(awserr.RequestFailure); ok && awsErr.StatusCode() == 404 {
   213  			d.SetId("")
   214  			log.Printf("[WARN] Error Reading Object (%s), object not found (HTTP status 404)", key)
   215  			return nil
   216  		}
   217  		return err
   218  	}
   219  	log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp)
   220  
   221  	d.Set("cache_control", resp.CacheControl)
   222  	d.Set("content_disposition", resp.ContentDisposition)
   223  	d.Set("content_encoding", resp.ContentEncoding)
   224  	d.Set("content_language", resp.ContentLanguage)
   225  	d.Set("content_type", resp.ContentType)
   226  	d.Set("version_id", resp.VersionId)
   227  	d.Set("kms_key_id", resp.SSEKMSKeyId)
   228  
   229  	// The "STANDARD" (which is also the default) storage
   230  	// class when set would not be included in the results.
   231  	d.Set("storage_class", s3.StorageClassStandard)
   232  	if resp.StorageClass != nil {
   233  		d.Set("storage_class", resp.StorageClass)
   234  	}
   235  
   236  	return nil
   237  }
   238  
   239  func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) error {
   240  	s3conn := meta.(*AWSClient).s3conn
   241  
   242  	bucket := d.Get("bucket").(string)
   243  	key := d.Get("key").(string)
   244  
   245  	if _, ok := d.GetOk("version_id"); ok {
   246  		// Bucket is versioned, we need to delete all versions
   247  		vInput := s3.ListObjectVersionsInput{
   248  			Bucket: aws.String(bucket),
   249  			Prefix: aws.String(key),
   250  		}
   251  		out, err := s3conn.ListObjectVersions(&vInput)
   252  		if err != nil {
   253  			return fmt.Errorf("Failed listing S3 object versions: %s", err)
   254  		}
   255  
   256  		for _, v := range out.Versions {
   257  			input := s3.DeleteObjectInput{
   258  				Bucket:    aws.String(bucket),
   259  				Key:       aws.String(key),
   260  				VersionId: v.VersionId,
   261  			}
   262  			_, err := s3conn.DeleteObject(&input)
   263  			if err != nil {
   264  				return fmt.Errorf("Error deleting S3 object version of %s:\n %s:\n %s",
   265  					key, v, err)
   266  			}
   267  		}
   268  	} else {
   269  		// Just delete the object
   270  		input := s3.DeleteObjectInput{
   271  			Bucket: aws.String(bucket),
   272  			Key:    aws.String(key),
   273  		}
   274  		_, err := s3conn.DeleteObject(&input)
   275  		if err != nil {
   276  			return fmt.Errorf("Error deleting S3 bucket object: %s", err)
   277  		}
   278  	}
   279  
   280  	return nil
   281  }
   282  
   283  func validateS3BucketObjectAclType(v interface{}, k string) (ws []string, errors []error) {
   284  	value := v.(string)
   285  
   286  	cannedAcls := map[string]bool{
   287  		s3.ObjectCannedACLPrivate:                true,
   288  		s3.ObjectCannedACLPublicRead:             true,
   289  		s3.ObjectCannedACLPublicReadWrite:        true,
   290  		s3.ObjectCannedACLAuthenticatedRead:      true,
   291  		s3.ObjectCannedACLAwsExecRead:            true,
   292  		s3.ObjectCannedACLBucketOwnerRead:        true,
   293  		s3.ObjectCannedACLBucketOwnerFullControl: true,
   294  	}
   295  
   296  	sentenceJoin := func(m map[string]bool) string {
   297  		keys := make([]string, 0, len(m))
   298  		for k := range m {
   299  			keys = append(keys, fmt.Sprintf("%q", k))
   300  		}
   301  		sort.Strings(keys)
   302  
   303  		length := len(keys)
   304  		words := make([]string, length)
   305  		copy(words, keys)
   306  
   307  		words[length-1] = fmt.Sprintf("or %s", words[length-1])
   308  		return strings.Join(words, ", ")
   309  	}
   310  
   311  	if _, ok := cannedAcls[value]; !ok {
   312  		errors = append(errors, fmt.Errorf(
   313  			"%q contains an invalid canned ACL type %q. Valid types are either %s",
   314  			k, value, sentenceJoin(cannedAcls)))
   315  	}
   316  	return
   317  }
   318  
   319  func validateS3BucketObjectStorageClassType(v interface{}, k string) (ws []string, errors []error) {
   320  	value := v.(string)
   321  
   322  	storageClass := map[string]bool{
   323  		s3.StorageClassStandard:          true,
   324  		s3.StorageClassReducedRedundancy: true,
   325  		s3.StorageClassStandardIa:        true,
   326  	}
   327  
   328  	if _, ok := storageClass[value]; !ok {
   329  		errors = append(errors, fmt.Errorf(
   330  			"%q contains an invalid Storage Class type %q. Valid types are either %q, %q, or %q",
   331  			k, value, s3.StorageClassStandard, s3.StorageClassReducedRedundancy,
   332  			s3.StorageClassStandardIa))
   333  	}
   334  	return
   335  }