github.com/hashicorp/packer@v1.14.3/post-processor/compress/post-processor.go (about) 1 // Copyright (c) HashiCorp, Inc. 2 // SPDX-License-Identifier: BUSL-1.1 3 4 //go:generate packer-sdc mapstructure-to-hcl2 -type Config 5 6 package compress 7 8 import ( 9 "archive/tar" 10 "archive/zip" 11 "context" 12 "fmt" 13 "io" 14 "os" 15 "path/filepath" 16 "regexp" 17 "runtime" 18 19 "github.com/biogo/hts/bgzf" 20 "github.com/dsnet/compress/bzip2" 21 "github.com/hashicorp/hcl/v2/hcldec" 22 "github.com/hashicorp/packer-plugin-sdk/common" 23 packersdk "github.com/hashicorp/packer-plugin-sdk/packer" 24 "github.com/hashicorp/packer-plugin-sdk/template/config" 25 "github.com/hashicorp/packer-plugin-sdk/template/interpolate" 26 "github.com/klauspost/pgzip" 27 "github.com/pierrec/lz4/v4" 28 "github.com/ulikunitz/xz" 29 ) 30 31 var ( 32 // ErrInvalidCompressionLevel is returned when the compression level passed 33 // to gzip is not in the expected range. See compress/flate for details. 34 ErrInvalidCompressionLevel = fmt.Errorf( 35 "Invalid compression level. Expected an integer from -1 to 9.") 36 37 ErrWrongInputCount = fmt.Errorf( 38 "Can only have 1 input file when not using tar/zip") 39 40 filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`) 41 ) 42 43 type Config struct { 44 common.PackerConfig `mapstructure:",squash"` 45 46 // Fields from config file 47 OutputPath string `mapstructure:"output"` 48 Format string `mapstructure:"format"` 49 CompressionLevel int `mapstructure:"compression_level"` 50 51 // Derived fields 52 Archive string 53 Algorithm string 54 55 ctx interpolate.Context 56 } 57 58 type PostProcessor struct { 59 config Config 60 } 61 62 func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() } 63 64 func (p *PostProcessor) Configure(raws ...interface{}) error { 65 err := config.Decode(&p.config, &config.DecodeOpts{ 66 PluginType: "compress", 67 Interpolate: true, 68 InterpolateContext: &p.config.ctx, 69 InterpolateFilter: &interpolate.RenderFilter{ 70 Exclude: []string{"output"}, 71 }, 72 }, raws...) 73 if err != nil { 74 return err 75 } 76 77 errs := new(packersdk.MultiError) 78 79 // If there is no explicit number of Go threads to use, then set it 80 if os.Getenv("GOMAXPROCS") == "" { 81 runtime.GOMAXPROCS(runtime.NumCPU()) 82 } 83 84 if p.config.OutputPath == "" { 85 p.config.OutputPath = "packer_{{.BuildName}}_{{.BuilderType}}" 86 } 87 88 if p.config.CompressionLevel > pgzip.BestCompression { 89 p.config.CompressionLevel = pgzip.BestCompression 90 } 91 // Technically 0 means "don't compress" but I don't know how to 92 // differentiate between "user entered zero" and "user entered nothing". 93 // Also, why bother creating a compressed file with zero compression? 94 if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 { 95 p.config.CompressionLevel = pgzip.DefaultCompression 96 } 97 98 if err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil { 99 errs = packersdk.MultiErrorAppend( 100 errs, fmt.Errorf("Error parsing target template: %s", err)) 101 } 102 103 p.config.detectFromFilename() 104 105 if len(errs.Errors) > 0 { 106 return errs 107 } 108 109 return nil 110 } 111 112 func (p *PostProcessor) PostProcess( 113 ctx context.Context, 114 ui packersdk.Ui, 115 artifact packersdk.Artifact, 116 ) (packersdk.Artifact, bool, bool, error) { 117 var generatedData map[interface{}]interface{} 118 stateData := artifact.State("generated_data") 119 if stateData != nil { 120 // Make sure it's not a nil map so we can assign to it later. 121 generatedData = stateData.(map[interface{}]interface{}) 122 } 123 // If stateData has a nil map generatedData will be nil 124 // and we need to make sure it's not 125 if generatedData == nil { 126 generatedData = make(map[interface{}]interface{}) 127 } 128 129 // These are extra variables that will be made available for interpolation. 130 generatedData["BuildName"] = p.config.PackerBuildName 131 generatedData["BuilderType"] = p.config.PackerBuilderType 132 p.config.ctx.Data = generatedData 133 134 target, err := interpolate.Render(p.config.OutputPath, &p.config.ctx) 135 if err != nil { 136 return nil, false, false, fmt.Errorf("Error interpolating output value: %s", err) 137 } else { 138 fmt.Println(target) 139 } 140 141 newArtifact := &Artifact{Path: target} 142 143 if err = os.MkdirAll(filepath.Dir(target), os.FileMode(0755)); err != nil { 144 return nil, false, false, fmt.Errorf( 145 "Unable to create dir for archive %s: %s", target, err) 146 } 147 outputFile, err := os.Create(target) 148 if err != nil { 149 return nil, false, false, fmt.Errorf( 150 "Unable to create archive %s: %s", target, err) 151 } 152 defer outputFile.Close() 153 154 // Setup output interface. If we're using compression, output is a 155 // compression writer. Otherwise it's just a file. 156 var output io.WriteCloser 157 errTmpl := "error creating %s writer: %s" 158 switch p.config.Algorithm { 159 case "bgzf": 160 ui.Say(fmt.Sprintf("Using bgzf compression with %d cores for %s", 161 runtime.GOMAXPROCS(-1), target)) 162 output, err = makeBGZFWriter(outputFile, p.config.CompressionLevel) 163 if err != nil { 164 return nil, false, false, fmt.Errorf(errTmpl, p.config.Algorithm, err) 165 } 166 defer output.Close() 167 case "bzip2": 168 ui.Say(fmt.Sprintf("Using bzip2 compression with 1 core for %s (library does not support MT)", 169 target)) 170 output, err = makeBZIP2Writer(outputFile, p.config.CompressionLevel) 171 if err != nil { 172 return nil, false, false, fmt.Errorf(errTmpl, p.config.Algorithm, err) 173 } 174 defer output.Close() 175 case "lz4": 176 ui.Say(fmt.Sprintf("Using lz4 compression with %d cores for %s", 177 runtime.GOMAXPROCS(-1), target)) 178 output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel) 179 if err != nil { 180 return nil, false, false, fmt.Errorf(errTmpl, p.config.Algorithm, err) 181 } 182 defer output.Close() 183 case "xz": 184 ui.Say(fmt.Sprintf("Using xz compression with 1 core for %s (library does not support MT)", 185 target)) 186 output, err = makeXZWriter(outputFile) 187 if err != nil { 188 return nil, false, false, fmt.Errorf(errTmpl, p.config.Algorithm, err) 189 } 190 defer output.Close() 191 case "pgzip": 192 ui.Say(fmt.Sprintf("Using pgzip compression with %d cores for %s", 193 runtime.GOMAXPROCS(-1), target)) 194 output, err = makePgzipWriter(outputFile, p.config.CompressionLevel) 195 if err != nil { 196 return nil, false, false, 197 fmt.Errorf(errTmpl, p.config.Algorithm, err) 198 } 199 defer output.Close() 200 default: 201 output = outputFile 202 } 203 204 compression := p.config.Algorithm 205 if compression == "" { 206 compression = "no compression" 207 } 208 209 // Build an archive, if we're supposed to do that. 210 switch p.config.Archive { 211 case "tar": 212 ui.Say(fmt.Sprintf("Tarring %s with %s", target, compression)) 213 err = createTarArchive(artifact.Files(), output) 214 if err != nil { 215 return nil, false, false, fmt.Errorf("Error creating tar: %s", err) 216 } 217 case "zip": 218 ui.Say(fmt.Sprintf("Zipping %s", target)) 219 err = createZipArchive(artifact.Files(), output) 220 if err != nil { 221 return nil, false, false, fmt.Errorf("Error creating zip: %s", err) 222 } 223 default: 224 // Filename indicates no tarball (just compress) so we'll do an io.Copy 225 // into our compressor. 226 if len(artifact.Files()) != 1 { 227 return nil, false, false, fmt.Errorf( 228 "Can only have 1 input file when not using tar/zip. Found %d "+ 229 "files: %v", len(artifact.Files()), artifact.Files()) 230 } 231 archiveFile := artifact.Files()[0] 232 ui.Say(fmt.Sprintf("Archiving %s with %s", archiveFile, compression)) 233 234 source, err := os.Open(archiveFile) 235 if err != nil { 236 return nil, false, false, fmt.Errorf( 237 "Failed to open source file %s for reading: %s", 238 archiveFile, err) 239 } 240 defer source.Close() 241 242 if _, err = io.Copy(output, source); err != nil { 243 return nil, false, false, fmt.Errorf("Failed to compress %s: %s", 244 archiveFile, err) 245 } 246 } 247 248 ui.Say(fmt.Sprintf("Archive %s completed", target)) 249 250 return newArtifact, false, false, nil 251 } 252 253 func (config *Config) detectFromFilename() { 254 var result [][]string 255 256 extensions := map[string]string{ 257 "tar": "tar", 258 "zip": "zip", 259 "gz": "pgzip", 260 "lz4": "lz4", 261 "bgzf": "bgzf", 262 "xz": "xz", 263 "bzip2": "bzip2", 264 } 265 266 if config.Format == "" { 267 result = filenamePattern.FindAllStringSubmatch(config.OutputPath, -1) 268 } else { 269 result = filenamePattern.FindAllStringSubmatch(fmt.Sprintf("%s.%s", config.OutputPath, config.Format), -1) 270 } 271 272 // No dots. Bail out with defaults. 273 if len(result) == 0 { 274 config.Algorithm = "pgzip" 275 config.Archive = "tar" 276 return 277 } 278 279 // Parse the last two .groups, if they're there 280 lastItem := result[len(result)-1][1] 281 var nextToLastItem string 282 if len(result) == 1 { 283 nextToLastItem = "" 284 } else { 285 nextToLastItem = result[len(result)-2][1] 286 } 287 288 // Should we make an archive? E.g. tar or zip? 289 if nextToLastItem == "tar" { 290 config.Archive = "tar" 291 } 292 if lastItem == "zip" || lastItem == "tar" { 293 config.Archive = lastItem 294 // Tar or zip is our final artifact. Bail out. 295 return 296 } 297 298 // Should we compress the artifact? 299 algorithm, ok := extensions[lastItem] 300 if ok { 301 config.Algorithm = algorithm 302 // We found our compression algorithm. Bail out. 303 return 304 } 305 306 // We didn't match a known compression format. Default to tar + pgzip 307 config.Algorithm = "pgzip" 308 config.Archive = "tar" 309 } 310 311 func makeBGZFWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { 312 bgzfWriter, err := bgzf.NewWriterLevel(output, compressionLevel, runtime.GOMAXPROCS(-1)) 313 if err != nil { 314 return nil, ErrInvalidCompressionLevel 315 } 316 return bgzfWriter, nil 317 } 318 319 func makeBZIP2Writer(output io.Writer, compressionLevel int) (io.WriteCloser, error) { 320 // Set the default to highest level compression 321 bzipCFG := &bzip2.WriterConfig{Level: 9} 322 // Override our set defaults 323 if compressionLevel > 0 { 324 bzipCFG.Level = compressionLevel 325 } 326 bzipWriter, err := bzip2.NewWriter(output, bzipCFG) 327 if err != nil { 328 return nil, err 329 } 330 return bzipWriter, nil 331 } 332 333 func makeLZ4Writer(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { 334 lzwriter := lz4.NewWriter(output) 335 if compressionLevel < 0 { 336 return lzwriter, nil 337 } 338 levels := map[int]lz4.CompressionLevel{ 339 0: lz4.Fast, 340 1: lz4.Level1, 341 2: lz4.Level2, 342 3: lz4.Level3, 343 4: lz4.Level4, 344 5: lz4.Level5, 345 6: lz4.Level6, 346 7: lz4.Level7, 347 8: lz4.Level8, 348 9: lz4.Level9, 349 } 350 level, ok := levels[compressionLevel] 351 if !ok { 352 return nil, ErrInvalidCompressionLevel 353 } 354 if err := lzwriter.Apply(lz4.CompressionLevelOption(level)); err != nil { 355 return nil, err 356 } 357 return lzwriter, nil 358 } 359 360 func makeXZWriter(output io.WriteCloser) (io.WriteCloser, error) { 361 xzwriter, err := xz.NewWriter(output) 362 if err != nil { 363 return nil, err 364 } 365 return xzwriter, nil 366 } 367 368 func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { 369 gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel) 370 if err != nil { 371 return nil, ErrInvalidCompressionLevel 372 } 373 _ = gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) 374 return gzipWriter, nil 375 } 376 377 func createTarArchive(files []string, output io.WriteCloser) error { 378 archive := tar.NewWriter(output) 379 defer archive.Close() 380 381 for _, path := range files { 382 file, err := os.Open(path) 383 if err != nil { 384 return fmt.Errorf("Unable to read file %s: %s", path, err) 385 } 386 defer file.Close() 387 388 fi, err := file.Stat() 389 if err != nil { 390 return fmt.Errorf("Unable to get fileinfo for %s: %s", path, err) 391 } 392 393 header, err := tar.FileInfoHeader(fi, path) 394 if err != nil { 395 return fmt.Errorf("Failed to create tar header for %s: %s", path, err) 396 } 397 398 // workaround for archive format on go >=1.10 399 setHeaderFormat(header) 400 401 if err := archive.WriteHeader(header); err != nil { 402 return fmt.Errorf("Failed to write tar header for %s: %s", path, err) 403 } 404 405 if _, err := io.Copy(archive, file); err != nil { 406 return fmt.Errorf("Failed to copy %s data to archive: %s", path, err) 407 } 408 } 409 return nil 410 } 411 412 func createZipArchive(files []string, output io.WriteCloser) error { 413 archive := zip.NewWriter(output) 414 defer archive.Close() 415 416 for _, path := range files { 417 path = filepath.ToSlash(path) 418 419 source, err := os.Open(path) 420 if err != nil { 421 return fmt.Errorf("Unable to read file %s: %s", path, err) 422 } 423 defer source.Close() 424 425 target, err := archive.Create(path) 426 if err != nil { 427 return fmt.Errorf("Failed to add zip header for %s: %s", path, err) 428 } 429 430 _, err = io.Copy(target, source) 431 if err != nil { 432 return fmt.Errorf("Failed to copy %s data to archive: %s", path, err) 433 } 434 } 435 return nil 436 }