github.com/emate/packer@v0.8.1-0.20150625195101-fe0fde195dc6/post-processor/compress/post-processor.go (about) 1 package compress 2 3 import ( 4 "archive/tar" 5 "archive/zip" 6 "compress/gzip" 7 "fmt" 8 "io" 9 "os" 10 "path/filepath" 11 "regexp" 12 "runtime" 13 14 "github.com/klauspost/pgzip" 15 "github.com/mitchellh/packer/common" 16 "github.com/mitchellh/packer/helper/config" 17 "github.com/mitchellh/packer/packer" 18 "github.com/mitchellh/packer/template/interpolate" 19 "github.com/pierrec/lz4" 20 ) 21 22 var ( 23 // ErrInvalidCompressionLevel is returned when the compression level passed 24 // to gzip is not in the expected range. See compress/flate for details. 25 ErrInvalidCompressionLevel = fmt.Errorf( 26 "Invalid compression level. Expected an integer from -1 to 9.") 27 28 ErrWrongInputCount = fmt.Errorf( 29 "Can only have 1 input file when not using tar/zip") 30 31 filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`) 32 ) 33 34 type Config struct { 35 common.PackerConfig `mapstructure:",squash"` 36 37 // Fields from config file 38 OutputPath string `mapstructure:"output"` 39 CompressionLevel int `mapstructure:"compression_level"` 40 KeepInputArtifact bool `mapstructure:"keep_input_artifact"` 41 42 // Derived fields 43 Archive string 44 Algorithm string 45 46 ctx interpolate.Context 47 } 48 49 type PostProcessor struct { 50 config Config 51 } 52 53 func (p *PostProcessor) Configure(raws ...interface{}) error { 54 err := config.Decode(&p.config, &config.DecodeOpts{ 55 Interpolate: true, 56 InterpolateContext: &p.config.ctx, 57 InterpolateFilter: &interpolate.RenderFilter{ 58 Exclude: []string{}, 59 }, 60 }, raws...) 61 62 errs := new(packer.MultiError) 63 64 // If there is no explicit number of Go threads to use, then set it 65 if os.Getenv("GOMAXPROCS") == "" { 66 runtime.GOMAXPROCS(runtime.NumCPU()) 67 } 68 69 if p.config.OutputPath == "" { 70 p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}" 71 } 72 73 if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil { 74 errs = packer.MultiErrorAppend( 75 errs, fmt.Errorf("Error parsing target template: %s", err)) 76 } 77 78 templates := map[string]*string{ 79 "output": &p.config.OutputPath, 80 } 81 82 if p.config.CompressionLevel > pgzip.BestCompression { 83 p.config.CompressionLevel = pgzip.BestCompression 84 } 85 // Technically 0 means "don't compress" but I don't know how to 86 // differentiate between "user entered zero" and "user entered nothing". 87 // Also, why bother creating a compressed file with zero compression? 88 if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 { 89 p.config.CompressionLevel = pgzip.DefaultCompression 90 } 91 92 for key, ptr := range templates { 93 if *ptr == "" { 94 errs = packer.MultiErrorAppend( 95 errs, fmt.Errorf("%s must be set", key)) 96 } 97 98 *ptr, err = interpolate.Render(p.config.OutputPath, &p.config.ctx) 99 if err != nil { 100 errs = packer.MultiErrorAppend( 101 errs, fmt.Errorf("Error processing %s: %s", key, err)) 102 } 103 } 104 105 p.config.detectFromFilename() 106 107 if len(errs.Errors) > 0 { 108 return errs 109 } 110 111 return nil 112 } 113 114 func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { 115 116 target := p.config.OutputPath 117 keep := p.config.KeepInputArtifact 118 newArtifact := &Artifact{Path: target} 119 120 outputFile, err := os.Create(target) 121 if err != nil { 122 return nil, false, fmt.Errorf( 123 "Unable to create archive %s: %s", target, err) 124 } 125 defer outputFile.Close() 126 127 // Setup output interface. If we're using compression, output is a 128 // compression writer. Otherwise it's just a file. 129 var output io.WriteCloser 130 switch p.config.Algorithm { 131 case "lz4": 132 ui.Say(fmt.Sprintf("Using lz4 compression with %d cores for %s", 133 runtime.GOMAXPROCS(-1), target)) 134 output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel) 135 defer output.Close() 136 case "pgzip": 137 ui.Say(fmt.Sprintf("Using pgzip compression with %d cores for %s", 138 runtime.GOMAXPROCS(-1), target)) 139 output, err = makePgzipWriter(outputFile, p.config.CompressionLevel) 140 defer output.Close() 141 default: 142 output = outputFile 143 } 144 145 compression := p.config.Algorithm 146 if compression == "" { 147 compression = "no compression" 148 } 149 150 // Build an archive, if we're supposed to do that. 151 switch p.config.Archive { 152 case "tar": 153 ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression)) 154 err = createTarArchive(artifact.Files(), output) 155 if err != nil { 156 return nil, keep, fmt.Errorf("Error creating tar: %s", err) 157 } 158 case "zip": 159 ui.Say(fmt.Sprintf("Zipping %s", target)) 160 err = createZipArchive(artifact.Files(), output) 161 if err != nil { 162 return nil, keep, fmt.Errorf("Error creating zip: %s", err) 163 } 164 default: 165 // Filename indicates no tarball (just compress) so we'll do an io.Copy 166 // into our compressor. 167 if len(artifact.Files()) != 1 { 168 return nil, keep, fmt.Errorf( 169 "Can only have 1 input file when not using tar/zip. Found %d "+ 170 "files: %v", len(artifact.Files()), artifact.Files()) 171 } 172 archiveFile := artifact.Files()[0] 173 ui.Say(fmt.Sprintf("Archiving %s with %s", archiveFile, compression)) 174 175 source, err := os.Open(archiveFile) 176 if err != nil { 177 return nil, keep, fmt.Errorf( 178 "Failed to open source file %s for reading: %s", 179 archiveFile, err) 180 } 181 defer source.Close() 182 183 if _, err = io.Copy(output, source); err != nil { 184 return nil, keep, fmt.Errorf("Failed to compress %s: %s", 185 archiveFile, err) 186 } 187 } 188 189 ui.Say(fmt.Sprintf("Archive %s completed", target)) 190 191 return newArtifact, keep, nil 192 } 193 194 func (config *Config) detectFromFilename() { 195 196 extensions := map[string]string{ 197 "tar": "tar", 198 "zip": "zip", 199 "gz": "pgzip", 200 "lz4": "lz4", 201 } 202 203 result := filenamePattern.FindAllStringSubmatch(config.OutputPath, -1) 204 205 // No dots. Bail out with defaults. 206 if len(result) == 0 { 207 config.Algorithm = "pgzip" 208 config.Archive = "tar" 209 return 210 } 211 212 // Parse the last two .groups, if they're there 213 lastItem := result[len(result)-1][1] 214 var nextToLastItem string 215 if len(result) == 1 { 216 nextToLastItem = "" 217 } else { 218 nextToLastItem = result[len(result)-2][1] 219 } 220 221 // Should we make an archive? E.g. tar or zip? 222 if nextToLastItem == "tar" { 223 config.Archive = "tar" 224 } 225 if lastItem == "zip" || lastItem == "tar" { 226 config.Archive = lastItem 227 // Tar or zip is our final artifact. Bail out. 228 return 229 } 230 231 // Should we compress the artifact? 232 algorithm, ok := extensions[lastItem] 233 if ok { 234 config.Algorithm = algorithm 235 // We found our compression algorithm. Bail out. 236 return 237 } 238 239 // We didn't match a known compression format. Default to tar + pgzip 240 config.Algorithm = "pgzip" 241 config.Archive = "tar" 242 return 243 } 244 245 func makeLZ4Writer(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { 246 lzwriter := lz4.NewWriter(output) 247 if compressionLevel > gzip.DefaultCompression { 248 lzwriter.Header.HighCompression = true 249 } 250 return lzwriter, nil 251 } 252 253 func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { 254 gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel) 255 if err != nil { 256 return nil, ErrInvalidCompressionLevel 257 } 258 gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) 259 return gzipWriter, nil 260 } 261 262 func createTarArchive(files []string, output io.WriteCloser) error { 263 archive := tar.NewWriter(output) 264 defer archive.Close() 265 266 for _, path := range files { 267 file, err := os.Open(path) 268 if err != nil { 269 return fmt.Errorf("Unable to read file %s: %s", path, err) 270 } 271 defer file.Close() 272 273 fi, err := file.Stat() 274 if err != nil { 275 return fmt.Errorf("Unable to get fileinfo for %s: %s", path, err) 276 } 277 278 header, err := tar.FileInfoHeader(fi, path) 279 if err != nil { 280 return fmt.Errorf("Failed to create tar header for %s: %s", path, err) 281 } 282 283 if err := archive.WriteHeader(header); err != nil { 284 return fmt.Errorf("Failed to write tar header for %s: %s", path, err) 285 } 286 287 if _, err := io.Copy(archive, file); err != nil { 288 return fmt.Errorf("Failed to copy %s data to archive: %s", path, err) 289 } 290 } 291 return nil 292 } 293 294 func createZipArchive(files []string, output io.WriteCloser) error { 295 archive := zip.NewWriter(output) 296 defer archive.Close() 297 298 for _, path := range files { 299 path = filepath.ToSlash(path) 300 301 source, err := os.Open(path) 302 if err != nil { 303 return fmt.Errorf("Unable to read file %s: %s", path, err) 304 } 305 defer source.Close() 306 307 target, err := archive.Create(path) 308 if err != nil { 309 return fmt.Errorf("Failed to add zip header for %s: %s", path, err) 310 } 311 312 _, err = io.Copy(target, source) 313 if err != nil { 314 return fmt.Errorf("Failed to copy %s data to archive: %s", path, err) 315 } 316 } 317 return nil 318 }