get.porter.sh/porter@v1.3.0/pkg/porter/archive.go (about)

     1  package porter
     2  
     3  import (
     4  	"archive/tar"
     5  	"compress/gzip"
     6  	"context"
     7  	"encoding/json"
     8  	"errors"
     9  	"fmt"
    10  	"io"
    11  	"net/http"
    12  	"os"
    13  	"path/filepath"
    14  	"sort"
    15  	"strings"
    16  	"time"
    17  
    18  	"get.porter.sh/porter/pkg"
    19  	"get.porter.sh/porter/pkg/cnab"
    20  	cnabtooci "get.porter.sh/porter/pkg/cnab/cnab-to-oci"
    21  	"get.porter.sh/porter/pkg/tracing"
    22  	"github.com/carolynvs/aferox"
    23  	"github.com/cnabio/cnab-go/bundle"
    24  	"github.com/cnabio/cnab-go/imagestore"
    25  	"github.com/cnabio/cnab-go/imagestore/construction"
    26  	"github.com/cnabio/cnab-to-oci/relocation"
    27  	"github.com/spf13/afero"
    28  	"go.opentelemetry.io/otel/attribute"
    29  )
    30  
    31  // ArchiveOptions defines the valid options for performing an archive operation
    32  type ArchiveOptions struct {
    33  	BundleReferenceOptions
    34  	ArchiveFile         string
    35  	CompressionLevel    string
    36  	compressionLevelInt int
    37  }
    38  
    39  var compressionLevelValues = map[string]int{
    40  	"NoCompression":      gzip.NoCompression,
    41  	"BestSpeed":          gzip.BestSpeed,
    42  	"BestCompression":    gzip.BestCompression,
    43  	"DefaultCompression": gzip.DefaultCompression,
    44  	"HuffmanOnly":        gzip.HuffmanOnly,
    45  }
    46  
    47  func (o *ArchiveOptions) GetCompressionLevelDefault() string {
    48  	return "DefaultCompression"
    49  }
    50  
    51  func (p *ArchiveOptions) GetCompressionLevelAllowedValues() []string {
    52  	levels := make([]string, 0, len(compressionLevelValues))
    53  	for level := range compressionLevelValues {
    54  		levels = append(levels, level)
    55  	}
    56  	sort.Strings(levels)
    57  	return levels
    58  }
    59  
    60  // Validate performs validation on the publish options
    61  func (o *ArchiveOptions) Validate(ctx context.Context, args []string, p *Porter) error {
    62  	if len(args) < 1 || args[0] == "" {
    63  		return errors.New("destination file is required")
    64  	}
    65  	if len(args) > 1 {
    66  		return fmt.Errorf("only one positional argument may be specified, the archive file name, but multiple were received: %s", args)
    67  	}
    68  	o.ArchiveFile = args[0]
    69  
    70  	if o.Reference == "" {
    71  		return errors.New("must provide a value for --reference of the form REGISTRY/bundle:tag")
    72  	}
    73  
    74  	if o.CompressionLevel == "" {
    75  		o.CompressionLevel = o.GetCompressionLevelDefault()
    76  	}
    77  	level, ok := compressionLevelValues[o.CompressionLevel]
    78  	if !ok {
    79  		return fmt.Errorf("invalid compression level: %s", o.CompressionLevel)
    80  	}
    81  	o.compressionLevelInt = level
    82  
    83  	return o.BundleReferenceOptions.Validate(ctx, args, p)
    84  }
    85  
    86  // Archive is a composite function that generates a CNAB thick bundle. It will pull the bundle image, and
    87  // any referenced images locally (if needed), export them to individual layers, generate a bundle.json and
    88  // then generate a gzipped tar archive containing the bundle.json and the images
    89  func (p *Porter) Archive(ctx context.Context, opts ArchiveOptions) error {
    90  	ctx, log := tracing.StartSpan(ctx)
    91  	defer log.EndSpan()
    92  
    93  	dir := filepath.Dir(opts.ArchiveFile)
    94  	if _, err := p.Config.FileSystem.Stat(dir); os.IsNotExist(err) {
    95  		return log.Error(fmt.Errorf("parent directory %q does not exist", filepath.ToSlash(dir)))
    96  	}
    97  
    98  	bundleRef, err := opts.GetBundleReference(ctx, p)
    99  	if err != nil {
   100  		return log.Error(err)
   101  	}
   102  
   103  	// This allows you to export thin or thick bundles, we only support generating "thick" archives
   104  	ctor, err := construction.NewConstructor(false)
   105  	if err != nil {
   106  		return log.Error(err)
   107  	}
   108  
   109  	dest, err := p.Config.FileSystem.OpenFile(opts.ArchiveFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, pkg.FileModeWritable)
   110  	if err != nil {
   111  		return log.Error(err)
   112  	}
   113  
   114  	exp := &exporter{
   115  		fs:                    p.Config.FileSystem,
   116  		out:                   p.Config.Out,
   117  		logs:                  p.Config.Out,
   118  		bundle:                bundleRef.Definition,
   119  		relocationMap:         bundleRef.RelocationMap,
   120  		destination:           dest,
   121  		imageStoreConstructor: ctor,
   122  		insecureRegistry:      opts.InsecureRegistry,
   123  		compressionLevel:      opts.compressionLevelInt,
   124  	}
   125  	if err := exp.export(ctx); err != nil {
   126  		return log.Error(err)
   127  	}
   128  
   129  	return nil
   130  }
   131  
   132  type exporter struct {
   133  	fs                    aferox.Aferox
   134  	out                   io.Writer
   135  	logs                  io.Writer
   136  	bundle                cnab.ExtendedBundle
   137  	relocationMap         relocation.ImageRelocationMap
   138  	destination           io.Writer
   139  	imageStoreConstructor imagestore.Constructor
   140  	imageStore            imagestore.Store
   141  	insecureRegistry      bool
   142  	compressionLevel      int
   143  }
   144  
   145  func (ex *exporter) export(ctx context.Context) error {
   146  	ctx, log := tracing.StartSpan(ctx)
   147  	defer log.EndSpan()
   148  
   149  	name := ex.bundle.Name + "-" + ex.bundle.Version
   150  	archiveDir, err := ex.createArchiveFolder(name)
   151  	if err != nil {
   152  		return fmt.Errorf("can not create archive folder: %w", err)
   153  	}
   154  	defer func() {
   155  		err = errors.Join(err, ex.fs.RemoveAll(archiveDir))
   156  	}()
   157  
   158  	bundleFile, err := ex.fs.OpenFile(filepath.Join(archiveDir, "bundle.json"), os.O_RDWR|os.O_CREATE, pkg.FileModeWritable)
   159  	if err != nil {
   160  		return err
   161  	}
   162  	defer bundleFile.Close()
   163  	_, err = ex.bundle.WriteTo(bundleFile)
   164  	if err != nil {
   165  		return fmt.Errorf("unable to write bundle.json in archive: %w", err)
   166  	}
   167  
   168  	reloData, err := json.Marshal(ex.relocationMap)
   169  	if err != nil {
   170  		return err
   171  	}
   172  	err = ex.fs.WriteFile(filepath.Join(archiveDir, "relocation-mapping.json"), reloData, pkg.FileModeWritable)
   173  	if err != nil {
   174  		return fmt.Errorf("unable to write relocation-mapping.json in archive: %w", err)
   175  	}
   176  
   177  	var transport *http.Transport
   178  	if ex.insecureRegistry {
   179  		transport = cnabtooci.GetInsecureRegistryTransport()
   180  	} else {
   181  		transport = http.DefaultTransport.(*http.Transport)
   182  	}
   183  
   184  	ex.imageStore, err = ex.imageStoreConstructor(
   185  		imagestore.WithArchiveDir(archiveDir),
   186  		imagestore.WithLogs(ex.logs),
   187  		imagestore.WithTransport(transport))
   188  	if err != nil {
   189  		return fmt.Errorf("error creating artifacts: %s", err)
   190  	}
   191  
   192  	if err := ex.prepareArtifacts(ex.bundle); err != nil {
   193  		return fmt.Errorf("error preparing bundle artifact: %s", err)
   194  	}
   195  
   196  	rc, err := ex.CustomTar(ctx, archiveDir, ex.compressionLevel)
   197  	if err != nil {
   198  		return fmt.Errorf("error creating archive: %w", err)
   199  	}
   200  	defer rc.Close()
   201  
   202  	_, err = io.Copy(ex.destination, rc)
   203  	return err
   204  }
   205  
   206  func (ex *exporter) createTarHeader(ctx context.Context, path string, file string, fileInfo os.FileInfo) (*tar.Header, error) {
   207  	log := tracing.LoggerFromContext(ctx)
   208  
   209  	header := &tar.Header{
   210  		ModTime:    time.Unix(0, 0),
   211  		AccessTime: time.Unix(0, 0),
   212  		ChangeTime: time.Unix(0, 0),
   213  		Uid:        0,
   214  		Gid:        0,
   215  	}
   216  
   217  	switch {
   218  	case fileInfo.Mode().IsDir():
   219  		header.Typeflag = tar.TypeDir
   220  		header.Mode = 0755
   221  	case fileInfo.Mode().IsRegular():
   222  		header.Typeflag = tar.TypeReg
   223  		header.Mode = 0644
   224  		header.Size = fileInfo.Size()
   225  	default:
   226  		log.Debug("Skipping header creation. Not a file/dir", attribute.String("createTarHeader.file", file))
   227  		return nil, nil
   228  	}
   229  
   230  	// ensure header has relative file path prepended with '.'
   231  	relativeFilePathName := file
   232  
   233  	if filepath.IsAbs(path) {
   234  		relativePath, err := filepath.Rel(path, file)
   235  
   236  		if err != nil {
   237  			return nil, err
   238  		}
   239  
   240  		if relativePath != "." {
   241  			relativeFilePathName = fmt.Sprintf(".%s%s", string(filepath.Separator), relativePath)
   242  		} else {
   243  			relativeFilePathName = relativePath
   244  		}
   245  	}
   246  
   247  	header.Name = filepath.ToSlash(relativeFilePathName)
   248  
   249  	// directories must be suffixed with '/'
   250  	if fileInfo.Mode().IsDir() && !strings.HasSuffix(header.Name, "/") {
   251  		header.Name += "/"
   252  	}
   253  
   254  	log.Debug("Created tar header", attribute.String("createTarHeader.headerName", header.Name))
   255  
   256  	return header, nil
   257  }
   258  
   259  func (ex *exporter) CustomTar(ctx context.Context, srcPath string, compressionLevel int) (io.ReadCloser, error) {
   260  	pipeReader, pipeWriter := io.Pipe()
   261  
   262  	gzipWriter, err := gzip.NewWriterLevel(pipeWriter, compressionLevel)
   263  	if err != nil {
   264  		return nil, err
   265  	}
   266  	tarWriter := tar.NewWriter(gzipWriter)
   267  
   268  	cleanSrcPath := filepath.Clean(srcPath)
   269  
   270  	go func() {
   271  		ctx, log := tracing.StartSpanWithName(ctx, "CustomTar.Walk")
   272  
   273  		defer func() {
   274  			if err := tarWriter.Close(); err != nil {
   275  				log.Warnf("Can't close tar writer: %s", err)
   276  			}
   277  			if err := gzipWriter.Close(); err != nil {
   278  				log.Warnf("Can't close gzip writer: %s\n", err)
   279  			}
   280  			if err := pipeWriter.Close(); err != nil {
   281  				log.Warnf("Can't close pipe writer: %s\n", err)
   282  			}
   283  			log.EndSpan()
   284  		}()
   285  
   286  		walker := func(path string, finfo os.FileInfo, err error) error {
   287  			if err != nil {
   288  				return fmt.Errorf("walk invoked with error: %w", err)
   289  			}
   290  
   291  			ctx, log := tracing.StartSpanWithName(ctx, "CustomTar.ProcessPath", attribute.String("customTar.path", path))
   292  			defer log.EndSpan()
   293  
   294  			hdr, err := ex.createTarHeader(ctx, srcPath, path, finfo)
   295  			if err != nil {
   296  				return fmt.Errorf("failed to create tar header for path %s: %w", path, err)
   297  			}
   298  
   299  			// if header is nil then it's not a regular file nor directory
   300  			if hdr == nil {
   301  				return nil
   302  			}
   303  
   304  			if err := tarWriter.WriteHeader(hdr); err != nil {
   305  				return fmt.Errorf("failed to write header for path %s: %w", path, err)
   306  			}
   307  
   308  			// if path is a dir, nothing more to do
   309  			if finfo.Mode().IsDir() {
   310  				return nil
   311  			}
   312  
   313  			// add file to tar
   314  			sourceFile, err := os.Open(path)
   315  			if err != nil {
   316  				return fmt.Errorf("failed to open %s: %w", path, err)
   317  			}
   318  
   319  			defer sourceFile.Close()
   320  			_, err = io.Copy(tarWriter, sourceFile)
   321  			if err != nil {
   322  				return fmt.Errorf("failed to copy %s: %w", path, err)
   323  			}
   324  
   325  			return nil
   326  		}
   327  
   328  		// build tar
   329  		err = filepath.Walk(cleanSrcPath, walker)
   330  	}()
   331  
   332  	return pipeReader, nil
   333  }
   334  
   335  // prepareArtifacts pulls all images, verifies their digests and
   336  // saves them to a directory called artifacts/ in the bundle directory
   337  func (ex *exporter) prepareArtifacts(bun cnab.ExtendedBundle) error {
   338  	var imageKeys []string
   339  	for imageKey := range bun.Images {
   340  		imageKeys = append(imageKeys, imageKey)
   341  	}
   342  	sort.Strings(imageKeys)
   343  	for _, k := range imageKeys {
   344  		if err := ex.addImage(bun.Images[k].BaseImage); err != nil {
   345  			return err
   346  		}
   347  	}
   348  
   349  	for _, in := range bun.InvocationImages {
   350  		if err := ex.addImage(in.BaseImage); err != nil {
   351  			return err
   352  		}
   353  	}
   354  
   355  	return nil
   356  }
   357  
   358  // addImage pulls an image using relocation map, adds it to the artifacts/ directory, and verifies its digest
   359  func (ex *exporter) addImage(base bundle.BaseImage) error {
   360  	if ex.relocationMap == nil {
   361  		return errors.New("relocation map is not provided")
   362  	}
   363  	location, ok := ex.relocationMap[base.Image]
   364  	if !ok {
   365  		return fmt.Errorf("can not locate the referenced image: %s", base.Image)
   366  	}
   367  	dig, err := ex.imageStore.Add(location)
   368  	if err != nil {
   369  		return err
   370  	}
   371  	return checkDigest(base, dig)
   372  }
   373  
   374  // createArchiveFolder set up a temporary directory for storing all data needed to archive a bundle.
   375  // It sanitizes the name and make sure only the current user has full permission to it.
   376  // If the name contains a path separator, all path separators will be replaced with "-".
   377  func (ex *exporter) createArchiveFolder(name string) (string, error) {
   378  	cleanedPath := strings.ReplaceAll(afero.UnicodeSanitize(name), "/", "-")
   379  	archiveDir, err := ex.fs.TempDir("", cleanedPath)
   380  	if err != nil {
   381  		return "", fmt.Errorf("can not create a temporary archive folder: %w", err)
   382  	}
   383  
   384  	err = ex.fs.Chmod(archiveDir, pkg.FileModeDirectory)
   385  	if err != nil {
   386  		return "", fmt.Errorf("can not change permission for the temporary archive folder: %w", err)
   387  	}
   388  	return archiveDir, nil
   389  }
   390  
   391  // checkDigest compares the content digest of the given image to the given content digest and returns an error if they
   392  // are both non-empty and do not match
   393  func checkDigest(image bundle.BaseImage, dig string) error {
   394  	digestFromManifest := image.Digest
   395  	if dig == "" || digestFromManifest == "" {
   396  		return nil
   397  	}
   398  	if digestFromManifest != dig {
   399  		return fmt.Errorf("content digest mismatch: image %s has digest %s but the digest should be %s according to the bundle manifest", image.Image, dig, digestFromManifest)
   400  	}
   401  	return nil
   402  }