github.com/GoogleContainerTools/skaffold@v1.39.18/pkg/skaffold/build/buildpacks/lifecycle.go (about)

     1  /*
     2  Copyright 2019 The Skaffold Authors
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package buildpacks
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"os"
    25  	"path/filepath"
    26  	"strconv"
    27  	"strings"
    28  
    29  	lifecycle "github.com/buildpacks/lifecycle/cmd"
    30  	pack "github.com/buildpacks/pack/pkg/client"
    31  	packimg "github.com/buildpacks/pack/pkg/image"
    32  	"github.com/buildpacks/pack/pkg/project"
    33  
    34  	"github.com/GoogleContainerTools/skaffold/pkg/skaffold/docker"
    35  	"github.com/GoogleContainerTools/skaffold/pkg/skaffold/output/log"
    36  	"github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest"
    37  )
    38  
    39  // For testing
    40  var (
    41  	runPackBuildFunc = runPackBuild
    42  )
    43  
    44  // images is a global list of builder/runner image pairs that are already pulled.
    45  // In a skaffold session, typically a skaffold dev loop, we want to avoid asking `pack`
    46  // to pull the images that are already pulled.
    47  var images pulledImages
    48  
    49  func (b *Builder) build(ctx context.Context, out io.Writer, a *latest.Artifact, tag string) (string, error) {
    50  	artifact := a.BuildpackArtifact
    51  	workspace := a.Workspace
    52  
    53  	// Read `project.toml` if it exists.
    54  	path := filepath.Join(workspace, artifact.ProjectDescriptor)
    55  	projectDescriptor, err := project.ReadProjectDescriptor(path)
    56  	if err != nil && !os.IsNotExist(err) {
    57  		return "", fmt.Errorf("failed to read project descriptor %q: %w", path, err)
    58  	}
    59  
    60  	// To improve caching, we always build the image with [:latest] tag
    61  	// This way, the lifecycle is able to "bootstrap" from the previously built image.
    62  	// The image will then be tagged as usual with the tag provided by the tag policy.
    63  	parsed, err := docker.ParseReference(tag)
    64  	if err != nil {
    65  		return "", fmt.Errorf("parsing tag %q: %w", tag, err)
    66  	}
    67  	latest := parsed.BaseName + ":latest"
    68  
    69  	// Evaluate Env Vars.
    70  	env, err := env(a, b.mode, projectDescriptor)
    71  	if err != nil {
    72  		return "", fmt.Errorf("unable to evaluate env variables: %w", err)
    73  	}
    74  
    75  	cc, err := containerConfig(artifact)
    76  	if err != nil {
    77  		return "", fmt.Errorf("%q: %w", a.ImageName, err)
    78  	}
    79  
    80  	// List buildpacks to be used for the build.
    81  	// Those specified in the skaffold.yaml replace those in the project.toml.
    82  	buildpacks := artifact.Buildpacks
    83  	if len(buildpacks) == 0 {
    84  		for _, bp := range projectDescriptor.Build.Buildpacks {
    85  			if bp.ID != "" {
    86  				if bp.Version == "" {
    87  					buildpacks = append(buildpacks, bp.ID)
    88  				} else {
    89  					buildpacks = append(buildpacks, fmt.Sprintf("%s@%s", bp.ID, bp.Version))
    90  				}
    91  				// } else {
    92  				// TODO(dgageot): Support URI.
    93  			}
    94  		}
    95  	}
    96  
    97  	builderImage, runImage, pullPolicy := resolveDependencyImages(artifact, b.artifacts, a.Dependencies, b.pushImages)
    98  
    99  	if err := runPackBuildFunc(ctx, out, b.localDocker, pack.BuildOptions{
   100  		AppPath:         workspace,
   101  		Builder:         builderImage,
   102  		RunImage:        runImage,
   103  		Buildpacks:      buildpacks,
   104  		Env:             env,
   105  		Image:           latest,
   106  		PullPolicy:      pullPolicy,
   107  		TrustBuilder:    func(_ string) bool { return artifact.TrustBuilder },
   108  		ContainerConfig: cc,
   109  		// TODO(dgageot): Support project.toml include/exclude.
   110  		// FileFilter: func(string) bool { return true },
   111  	}); err != nil {
   112  		return "", err
   113  	}
   114  
   115  	images.MarkAsPulled(artifact.Builder, artifact.RunImage)
   116  
   117  	return latest, nil
   118  }
   119  
   120  func runPackBuild(ctx context.Context, out io.Writer, localDocker docker.LocalDaemon, opts pack.BuildOptions) error {
   121  	packClient, err := pack.NewClient(
   122  		pack.WithDockerClient(localDocker.RawClient()),
   123  		pack.WithLogger(NewLogger(out)),
   124  		pack.WithFetcher(newFetcher(out, localDocker)),
   125  	)
   126  	if err != nil {
   127  		return fmt.Errorf("unable to create pack client: %w", err)
   128  	}
   129  
   130  	err = packClient.Build(ctx, opts)
   131  	// pack turns exit codes from the lifecycle into `failed with status code: N`
   132  	if err != nil {
   133  		err = rewriteLifecycleStatusCode(err)
   134  	}
   135  	return err
   136  }
   137  
   138  func rewriteLifecycleStatusCode(lce error) error {
   139  	prefix := "failed with status code: "
   140  	lceText := lce.Error()
   141  	if strings.HasPrefix(lceText, prefix) {
   142  		sc := lceText[len(prefix):]
   143  		if code, err := strconv.Atoi(sc); err == nil {
   144  			return errors.New(mapLifecycleStatusCode(code))
   145  		}
   146  	}
   147  	return lce
   148  }
   149  
   150  func mapLifecycleStatusCode(code int) string {
   151  	switch code {
   152  	case lifecycle.CodeFailed:
   153  		return "buildpacks lifecycle failed"
   154  	case lifecycle.CodeInvalidArgs:
   155  		return "lifecycle reported invalid arguments"
   156  	case lifecycle.CodeIncompatiblePlatformAPI:
   157  		return "incompatible version of Platform API"
   158  	case lifecycle.CodeIncompatibleBuildpackAPI:
   159  		return "incompatible version of Buildpacks API"
   160  	default:
   161  		// we should never see CodeRebaseError or CodeLaunchError
   162  		return fmt.Sprintf("lifecycle failed with status code %d", code)
   163  	}
   164  }
   165  
   166  func envMap(env []string) map[string]string {
   167  	kv := make(map[string]string)
   168  
   169  	for _, e := range env {
   170  		parts := strings.SplitN(e, "=", 2)
   171  		kv[parts[0]] = parts[1]
   172  	}
   173  
   174  	return kv
   175  }
   176  
   177  // resolveDependencyImages replaces the provided builder and run images with built images from the required artifacts if specified.
   178  // The return values are builder image, run image, and if remote pull is required.
   179  func resolveDependencyImages(artifact *latest.BuildpackArtifact, r ArtifactResolver, deps []*latest.ArtifactDependency, pushImages bool) (string, string, packimg.PullPolicy) {
   180  	builderImage, runImage := artifact.Builder, artifact.RunImage
   181  	builderImageLocal, runImageLocal := false, false
   182  
   183  	// We mimic pack's behaviour and always pull the images on first build
   184  	// (tracked via images.AreAlreadyPulled()), but we never pull on
   185  	// subsequent builds.  And if either the builder or run image are
   186  	// dependent images then we do not pull and use PullIfNecessary.
   187  	pullPolicy := packimg.PullAlways
   188  
   189  	var found bool
   190  	for _, d := range deps {
   191  		if builderImage == d.Alias {
   192  			builderImage, found = r.GetImageTag(d.ImageName)
   193  			if !found {
   194  				log.Entry(context.TODO()).Fatalf("failed to resolve build result for required artifact %q", d.ImageName)
   195  			}
   196  			builderImageLocal = true
   197  		}
   198  		if runImage == d.Alias {
   199  			runImage, found = r.GetImageTag(d.ImageName)
   200  			if !found {
   201  				log.Entry(context.TODO()).Fatalf("failed to resolve build result for required artifact %q", d.ImageName)
   202  			}
   203  			runImageLocal = true
   204  		}
   205  	}
   206  
   207  	if builderImageLocal && runImageLocal {
   208  		// if both builder and run image are built locally, there's nothing to pull.
   209  		pullPolicy = packimg.PullNever
   210  	} else if builderImageLocal || runImageLocal {
   211  		// if only one of builder or run image is built locally, we can enable remote image pull only if that image is also pushed to remote.
   212  		pullPolicy = packimg.PullIfNotPresent
   213  
   214  		// if remote image pull is disabled then the image that is not fetched from the required artifacts might not be latest.
   215  		if !pushImages && builderImageLocal {
   216  			log.Entry(context.TODO()).Warn("Disabled remote image pull since builder image is built locally. Buildpacks run image may not be latest.")
   217  		}
   218  		if !pushImages && runImageLocal {
   219  			log.Entry(context.TODO()).Warn("Disabled remote image pull since run image is built locally. Buildpacks builder image may not be latest.")
   220  		}
   221  	}
   222  
   223  	// if remote pull is enabled ensure that same images aren't pulled twice.
   224  	if pullPolicy == packimg.PullAlways && images.AreAlreadyPulled(builderImage, runImage) {
   225  		pullPolicy = packimg.PullNever
   226  	}
   227  
   228  	return builderImage, runImage, pullPolicy
   229  }
   230  
   231  func containerConfig(artifact *latest.BuildpackArtifact) (pack.ContainerConfig, error) {
   232  	var vols []string
   233  	if artifact.Volumes != nil {
   234  		for _, v := range *artifact.Volumes {
   235  			if v.Host == "" || v.Target == "" {
   236  				// in case these slip by the JSON schema
   237  				return pack.ContainerConfig{}, errors.New("buildpacks volumes must have both host and target")
   238  			}
   239  			var spec string
   240  			if v.Options == "" {
   241  				spec = fmt.Sprintf("%s:%s", v.Host, v.Target)
   242  			} else {
   243  				spec = fmt.Sprintf("%s:%s:%s", v.Host, v.Target, v.Options)
   244  			}
   245  			vols = append(vols, spec)
   246  		}
   247  	}
   248  	return pack.ContainerConfig{Volumes: vols}, nil
   249  }