github.com/rothwerx/packer@v0.9.0/post-processor/amazon-import/post-processor.go (about)

     1  package amazonimport
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"os"
     7  	"strings"
     8  
     9  	"github.com/aws/aws-sdk-go/aws"
    10  	"github.com/aws/aws-sdk-go/aws/session"
    11  	"github.com/aws/aws-sdk-go/service/ec2"
    12  	"github.com/aws/aws-sdk-go/service/s3"
    13  	"github.com/aws/aws-sdk-go/service/s3/s3manager"
    14  	// This is bad, it should be pulled out into a common folder across
    15  	// both builders and post-processors
    16  	awscommon "github.com/mitchellh/packer/builder/amazon/common"
    17  	"github.com/mitchellh/packer/common"
    18  	"github.com/mitchellh/packer/helper/config"
    19  	"github.com/mitchellh/packer/packer"
    20  	"github.com/mitchellh/packer/template/interpolate"
    21  )
    22  
    23  const BuilderId = "packer.post-processor.amazon-import"
    24  
    25  // Configuration of this post processor
    26  type Config struct {
    27  	common.PackerConfig    `mapstructure:",squash"`
    28  	awscommon.AccessConfig `mapstructure:",squash"`
    29  
    30  	// Variables specific to this post processor
    31  	S3Bucket  string            `mapstructure:"s3_bucket_name"`
    32  	S3Key     string            `mapstructure:"s3_key_name"`
    33  	SkipClean bool              `mapstructure:"skip_clean"`
    34  	Tags      map[string]string `mapstructure:"tags"`
    35  
    36  	ctx interpolate.Context
    37  }
    38  
    39  type PostProcessor struct {
    40  	config Config
    41  }
    42  
    43  // Entry point for configuration parisng when we've defined
    44  func (p *PostProcessor) Configure(raws ...interface{}) error {
    45  	p.config.ctx.Funcs = awscommon.TemplateFuncs
    46  	err := config.Decode(&p.config, &config.DecodeOpts{
    47  		Interpolate:        true,
    48  		InterpolateContext: &p.config.ctx,
    49  		InterpolateFilter: &interpolate.RenderFilter{
    50  			Exclude: []string{
    51  				"s3_key_name",
    52  			},
    53  		},
    54  	}, raws...)
    55  	if err != nil {
    56  		return err
    57  	}
    58  
    59  	// Set defaults
    60  	if p.config.S3Key == "" {
    61  		p.config.S3Key = "packer-import-{{timestamp}}.ova"
    62  	}
    63  
    64  	errs := new(packer.MultiError)
    65  
    66  	// Check and render s3_key_name
    67  	if err = interpolate.Validate(p.config.S3Key, &p.config.ctx); err != nil {
    68  		errs = packer.MultiErrorAppend(
    69  			errs, fmt.Errorf("Error parsing s3_key_name template: %s", err))
    70  	}
    71  
    72  	// Check we have AWS access variables defined somewhere
    73  	errs = packer.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...)
    74  
    75  	// define all our required paramaters
    76  	templates := map[string]*string{
    77  		"s3_bucket_name": &p.config.S3Bucket,
    78  	}
    79  	// Check out required params are defined
    80  	for key, ptr := range templates {
    81  		if *ptr == "" {
    82  			errs = packer.MultiErrorAppend(
    83  				errs, fmt.Errorf("%s must be set", key))
    84  		}
    85  	}
    86  
    87  	// Anything which flagged return back up the stack
    88  	if len(errs.Errors) > 0 {
    89  		return errs
    90  	}
    91  
    92  	return nil
    93  }
    94  
    95  func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
    96  	var err error
    97  
    98  	config, err := p.config.Config()
    99  	if err != nil {
   100  		return nil, false, err
   101  	}
   102  
   103  	// Render this key since we didn't in the configure phase
   104  	p.config.S3Key, err = interpolate.Render(p.config.S3Key, &p.config.ctx)
   105  	if err != nil {
   106  		return nil, false, fmt.Errorf("Error rendering s3_key_name template: %s", err)
   107  	}
   108  	log.Printf("Rendered s3_key_name as %s", p.config.S3Key)
   109  
   110  	log.Println("Looking for OVA in artifact")
   111  	// Locate the files output from the builder
   112  	source := ""
   113  	for _, path := range artifact.Files() {
   114  		if strings.HasSuffix(path, ".ova") {
   115  			source = path
   116  			break
   117  		}
   118  	}
   119  
   120  	// Hope we found something useful
   121  	if source == "" {
   122  		return nil, false, fmt.Errorf("No OVA file found in artifact from builder")
   123  	}
   124  
   125  	// Set up the AWS session
   126  	log.Println("Creating AWS session")
   127  	session := session.New(config)
   128  
   129  	// open the source file
   130  	log.Printf("Opening file %s to upload", source)
   131  	file, err := os.Open(source)
   132  	if err != nil {
   133  		return nil, false, fmt.Errorf("Failed to open %s: %s", source, err)
   134  	}
   135  
   136  	ui.Message(fmt.Sprintf("Uploading %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key))
   137  
   138  	// Copy the OVA file into the S3 bucket specified
   139  	uploader := s3manager.NewUploader(session)
   140  	_, err = uploader.Upload(&s3manager.UploadInput{
   141  		Body:   file,
   142  		Bucket: &p.config.S3Bucket,
   143  		Key:    &p.config.S3Key,
   144  	})
   145  	if err != nil {
   146  		return nil, false, fmt.Errorf("Failed to upload %s: %s", source, err)
   147  	}
   148  
   149  	// May as well stop holding this open now
   150  	file.Close()
   151  
   152  	ui.Message(fmt.Sprintf("Completed upload of %s to s3://%s/%s", source, p.config.S3Bucket, p.config.S3Key))
   153  
   154  	// Call EC2 image import process
   155  	log.Printf("Calling EC2 to import from s3://%s/%s", p.config.S3Bucket, p.config.S3Key)
   156  
   157  	ec2conn := ec2.New(session)
   158  	import_start, err := ec2conn.ImportImage(&ec2.ImportImageInput{
   159  		DiskContainers: []*ec2.ImageDiskContainer{
   160  			{
   161  				UserBucket: &ec2.UserBucket{
   162  					S3Bucket: &p.config.S3Bucket,
   163  					S3Key:    &p.config.S3Key,
   164  				},
   165  			},
   166  		},
   167  	})
   168  
   169  	if err != nil {
   170  		return nil, false, fmt.Errorf("Failed to start import from s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err)
   171  	}
   172  
   173  	ui.Message(fmt.Sprintf("Started import of s3://%s/%s, task id %s", p.config.S3Bucket, p.config.S3Key, *import_start.ImportTaskId))
   174  
   175  	// Wait for import process to complete, this takess a while
   176  	ui.Message(fmt.Sprintf("Waiting for task %s to complete (may take a while)", *import_start.ImportTaskId))
   177  
   178  	stateChange := awscommon.StateChangeConf{
   179  		Pending: []string{"pending", "active"},
   180  		Refresh: awscommon.ImportImageRefreshFunc(ec2conn, *import_start.ImportTaskId),
   181  		Target:  "completed",
   182  	}
   183  
   184  	// Actually do the wait for state change
   185  	// We ignore errors out of this and check job state in AWS API
   186  	awscommon.WaitForState(&stateChange)
   187  
   188  	// Retrieve what the outcome was for the import task
   189  	import_result, err := ec2conn.DescribeImportImageTasks(&ec2.DescribeImportImageTasksInput{
   190  		ImportTaskIds: []*string{
   191  			import_start.ImportTaskId,
   192  		},
   193  	})
   194  
   195  	if err != nil {
   196  		return nil, false, fmt.Errorf("Failed to find import task %s: %s", *import_start.ImportTaskId, err)
   197  	}
   198  
   199  	// Check it was actually completed
   200  	if *import_result.ImportImageTasks[0].Status != "completed" {
   201  		// The most useful error message is from the job itself
   202  		return nil, false, fmt.Errorf("Import task %s failed: %s", *import_start.ImportTaskId, *import_result.ImportImageTasks[0].StatusMessage)
   203  	}
   204  
   205  	ui.Message(fmt.Sprintf("Import task %s complete", *import_start.ImportTaskId))
   206  
   207  	// Pull AMI ID out of the completed job
   208  	createdami := *import_result.ImportImageTasks[0].ImageId
   209  
   210  	// If we have tags, then apply them now to both the AMI and snaps
   211  	// created by the import
   212  	if len(p.config.Tags) > 0 {
   213  		var ec2Tags []*ec2.Tag
   214  
   215  		log.Printf("Repacking tags into AWS format")
   216  
   217  		for key, value := range p.config.Tags {
   218  			ui.Message(fmt.Sprintf("Adding tag \"%s\": \"%s\"", key, value))
   219  			ec2Tags = append(ec2Tags, &ec2.Tag{
   220  				Key:   aws.String(key),
   221  				Value: aws.String(value),
   222  			})
   223  		}
   224  
   225  		resourceIds := []*string{&createdami}
   226  
   227  		log.Printf("Getting details of %s", createdami)
   228  
   229  		imageResp, err := ec2conn.DescribeImages(&ec2.DescribeImagesInput{
   230  			ImageIds: resourceIds,
   231  		})
   232  
   233  		if err != nil {
   234  			return nil, false, fmt.Errorf("Failed to retrieve details for AMI %s: %s", createdami, err)
   235  		}
   236  
   237  		if len(imageResp.Images) == 0 {
   238  			return nil, false, fmt.Errorf("AMI %s has no images", createdami)
   239  		}
   240  
   241  		image := imageResp.Images[0]
   242  
   243  		log.Printf("Walking block device mappings for %s to find snapshots", createdami)
   244  
   245  		for _, device := range image.BlockDeviceMappings {
   246  			if device.Ebs != nil && device.Ebs.SnapshotId != nil {
   247  				ui.Message(fmt.Sprintf("Tagging snapshot %s", *device.Ebs.SnapshotId))
   248  				resourceIds = append(resourceIds, device.Ebs.SnapshotId)
   249  			}
   250  		}
   251  
   252  		ui.Message(fmt.Sprintf("Tagging AMI %s", createdami))
   253  
   254  		_, err = ec2conn.CreateTags(&ec2.CreateTagsInput{
   255  			Resources: resourceIds,
   256  			Tags:      ec2Tags,
   257  		})
   258  
   259  		if err != nil {
   260  			return nil, false, fmt.Errorf("Failed to add tags to resources %#v: %s", resourceIds, err)
   261  		}
   262  
   263  	}
   264  
   265  	// Add the reported AMI ID to the artifact list
   266  	log.Printf("Adding created AMI ID %s in region %s to output artifacts", createdami, *config.Region)
   267  	artifact = &awscommon.Artifact{
   268  		Amis: map[string]string{
   269  			*config.Region: createdami,
   270  		},
   271  		BuilderIdValue: BuilderId,
   272  		Conn:           ec2conn,
   273  	}
   274  
   275  	if !p.config.SkipClean {
   276  		ui.Message(fmt.Sprintf("Deleting import source s3://%s/%s", p.config.S3Bucket, p.config.S3Key))
   277  		s3conn := s3.New(session)
   278  		_, err = s3conn.DeleteObject(&s3.DeleteObjectInput{
   279  			Bucket: &p.config.S3Bucket,
   280  			Key:    &p.config.S3Key,
   281  		})
   282  		if err != nil {
   283  			return nil, false, fmt.Errorf("Failed to delete s3://%s/%s: %s", p.config.S3Bucket, p.config.S3Key, err)
   284  		}
   285  	}
   286  
   287  	return artifact, false, nil
   288  }