github.com/munnerz/test-infra@v0.0.0-20190108210205-ce3d181dc989/prow/pod-utils/decorate/podspec.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package decorate
    18  
    19  import (
    20  	"fmt"
    21  	"path"
    22  	"path/filepath"
    23  	"sort"
    24  	"strconv"
    25  	"strings"
    26  
    27  	"github.com/sirupsen/logrus"
    28  	"k8s.io/api/core/v1"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/util/validation"
    31  
    32  	"k8s.io/test-infra/prow/clonerefs"
    33  	"k8s.io/test-infra/prow/entrypoint"
    34  	"k8s.io/test-infra/prow/gcsupload"
    35  	"k8s.io/test-infra/prow/initupload"
    36  	"k8s.io/test-infra/prow/kube"
    37  	"k8s.io/test-infra/prow/pod-utils/clone"
    38  	"k8s.io/test-infra/prow/pod-utils/downwardapi"
    39  	"k8s.io/test-infra/prow/pod-utils/wrapper"
    40  	"k8s.io/test-infra/prow/sidecar"
    41  )
    42  
    43  const (
    44  	logMountName            = "logs"
    45  	logMountPath            = "/logs"
    46  	artifactsEnv            = "ARTIFACTS"
    47  	artifactsPath           = logMountPath + "/artifacts"
    48  	codeMountName           = "code"
    49  	codeMountPath           = "/home/prow/go"
    50  	gopathEnv               = "GOPATH"
    51  	toolsMountName          = "tools"
    52  	toolsMountPath          = "/tools"
    53  	gcsCredentialsMountName = "gcs-credentials"
    54  	gcsCredentialsMountPath = "/secrets/gcs"
    55  )
    56  
    57  // Labels returns a string slice with label consts from kube.
    58  func Labels() []string {
    59  	return []string{kube.ProwJobTypeLabel, kube.CreatedByProw, kube.ProwJobIDLabel}
    60  }
    61  
    62  // VolumeMounts returns a string slice with *MountName consts in it.
    63  func VolumeMounts() []string {
    64  	return []string{logMountName, codeMountName, toolsMountName, gcsCredentialsMountName}
    65  }
    66  
    67  // VolumeMountPaths returns a string slice with *MountPath consts in it.
    68  func VolumeMountPaths() []string {
    69  	return []string{logMountPath, codeMountPath, toolsMountPath, gcsCredentialsMountPath}
    70  }
    71  
    72  // LabelsAndAnnotationsForSpec returns a minimal set of labels to add to prowjobs or its owned resources.
    73  //
    74  // User-provided extraLabels and extraAnnotations values will take precedence over auto-provided values.
    75  func LabelsAndAnnotationsForSpec(spec kube.ProwJobSpec, extraLabels, extraAnnotations map[string]string) (map[string]string, map[string]string) {
    76  	jobNameForLabel := spec.Job
    77  	if len(jobNameForLabel) > validation.LabelValueMaxLength {
    78  		// TODO(fejta): consider truncating middle rather than end.
    79  		jobNameForLabel = strings.TrimRight(spec.Job[:validation.LabelValueMaxLength], ".-")
    80  		logrus.WithFields(logrus.Fields{
    81  			"job":       spec.Job,
    82  			"key":       kube.ProwJobAnnotation,
    83  			"value":     spec.Job,
    84  			"truncated": jobNameForLabel,
    85  		}).Info("Cannot use full job name, will truncate.")
    86  	}
    87  	labels := map[string]string{
    88  		kube.CreatedByProw:     "true",
    89  		kube.ProwJobTypeLabel:  string(spec.Type),
    90  		kube.ProwJobAnnotation: jobNameForLabel,
    91  	}
    92  	if spec.Type != kube.PeriodicJob && spec.Refs != nil {
    93  		labels[kube.OrgLabel] = spec.Refs.Org
    94  		labels[kube.RepoLabel] = spec.Refs.Repo
    95  		if len(spec.Refs.Pulls) > 0 {
    96  			labels[kube.PullLabel] = strconv.Itoa(spec.Refs.Pulls[0].Number)
    97  		}
    98  	}
    99  
   100  	for k, v := range extraLabels {
   101  		labels[k] = v
   102  	}
   103  
   104  	// let's validate labels
   105  	for key, value := range labels {
   106  		if errs := validation.IsValidLabelValue(value); len(errs) > 0 {
   107  			// try to use basename of a path, if path contains invalid //
   108  			base := filepath.Base(value)
   109  			if errs := validation.IsValidLabelValue(base); len(errs) == 0 {
   110  				labels[key] = base
   111  				continue
   112  			}
   113  			logrus.WithFields(logrus.Fields{
   114  				"key":    key,
   115  				"value":  value,
   116  				"errors": errs,
   117  			}).Warn("Removing invalid label")
   118  			delete(labels, key)
   119  		}
   120  	}
   121  
   122  	annotations := map[string]string{
   123  		kube.ProwJobAnnotation: spec.Job,
   124  	}
   125  	for k, v := range extraAnnotations {
   126  		annotations[k] = v
   127  	}
   128  
   129  	return labels, annotations
   130  }
   131  
   132  // LabelsAndAnnotationsForJob returns a standard set of labels to add to pod/build/etc resources.
   133  func LabelsAndAnnotationsForJob(pj kube.ProwJob) (map[string]string, map[string]string) {
   134  	var extraLabels map[string]string
   135  	if extraLabels = pj.ObjectMeta.Labels; extraLabels == nil {
   136  		extraLabels = map[string]string{}
   137  	}
   138  	extraLabels[kube.ProwJobIDLabel] = pj.ObjectMeta.Name
   139  	return LabelsAndAnnotationsForSpec(pj.Spec, extraLabels, nil)
   140  }
   141  
   142  // ProwJobToPod converts a ProwJob to a Pod that will run the tests.
   143  func ProwJobToPod(pj kube.ProwJob, buildID string) (*v1.Pod, error) {
   144  	if pj.Spec.PodSpec == nil {
   145  		return nil, fmt.Errorf("prowjob %q lacks a pod spec", pj.Name)
   146  	}
   147  
   148  	rawEnv, err := downwardapi.EnvForSpec(downwardapi.NewJobSpec(pj.Spec, buildID, pj.Name))
   149  	if err != nil {
   150  		return nil, err
   151  	}
   152  
   153  	spec := pj.Spec.PodSpec.DeepCopy()
   154  	spec.RestartPolicy = "Never"
   155  	spec.Containers[0].Name = kube.TestContainerName
   156  
   157  	// if the user has not provided a serviceaccount to use or explicitly
   158  	// requested mounting the default token, we treat the unset value as
   159  	// false, while kubernetes treats it as true if it is unset because
   160  	// it was added in v1.6
   161  	if spec.AutomountServiceAccountToken == nil && spec.ServiceAccountName == "" {
   162  		myFalse := false
   163  		spec.AutomountServiceAccountToken = &myFalse
   164  	}
   165  
   166  	if pj.Spec.DecorationConfig == nil {
   167  		spec.Containers[0].Env = append(spec.Containers[0].Env, kubeEnv(rawEnv)...)
   168  	} else {
   169  		if err := decorate(spec, &pj, rawEnv); err != nil {
   170  			return nil, fmt.Errorf("error decorating podspec: %v", err)
   171  		}
   172  	}
   173  
   174  	podLabels, annotations := LabelsAndAnnotationsForJob(pj)
   175  	return &v1.Pod{
   176  		ObjectMeta: metav1.ObjectMeta{
   177  			Name:        pj.ObjectMeta.Name,
   178  			Labels:      podLabels,
   179  			Annotations: annotations,
   180  		},
   181  		Spec: *spec,
   182  	}, nil
   183  }
   184  
   185  const cloneLogPath = "clone.json"
   186  
   187  // CloneLogPath returns the path to the clone log file in the volume mount.
   188  func CloneLogPath(logMount kube.VolumeMount) string {
   189  	return filepath.Join(logMount.MountPath, cloneLogPath)
   190  }
   191  
   192  // Exposed for testing
   193  const (
   194  	cloneRefsName    = "clonerefs"
   195  	cloneRefsCommand = "/clonerefs"
   196  )
   197  
   198  // cloneEnv encodes clonerefs Options into json and puts it into an environment variable
   199  func cloneEnv(opt clonerefs.Options) ([]v1.EnvVar, error) {
   200  	// TODO(fejta): use flags
   201  	cloneConfigEnv, err := clonerefs.Encode(opt)
   202  	if err != nil {
   203  		return nil, err
   204  	}
   205  	return kubeEnv(map[string]string{clonerefs.JSONConfigEnvVar: cloneConfigEnv}), nil
   206  }
   207  
   208  // sshVolume converts a secret holding ssh keys into the corresponding volume and mount.
   209  //
   210  // This is used by CloneRefs to attach the mount to the clonerefs container.
   211  func sshVolume(secret string) (kube.Volume, kube.VolumeMount) {
   212  	var sshKeyMode int32 = 0400 // this is octal, so symbolic ref is `u+r`
   213  	name := strings.Join([]string{"ssh-keys", secret}, "-")
   214  	mountPath := path.Join("/secrets/ssh", secret)
   215  	v := kube.Volume{
   216  		Name: name,
   217  		VolumeSource: kube.VolumeSource{
   218  			Secret: &kube.SecretSource{
   219  				SecretName:  secret,
   220  				DefaultMode: &sshKeyMode,
   221  			},
   222  		},
   223  	}
   224  
   225  	vm := kube.VolumeMount{
   226  		Name:      name,
   227  		MountPath: mountPath,
   228  		ReadOnly:  true,
   229  	}
   230  
   231  	return v, vm
   232  }
   233  
   234  // cookiefileVolumes converts a secret holding cookies into the corresponding volume and mount.
   235  //
   236  // Secret can be of the form secret-name/base-name or just secret-name.
   237  // Here secret-name refers to the kubernetes secret volume to mount, and base-name refers to the key in the secret
   238  // where the cookies are stored. The secret-name pattern is equivalent to secret-name/secret-name.
   239  //
   240  // This is used by CloneRefs to attach the mount to the clonerefs container.
   241  // The returned string value is the path to the cookiefile for use with --cookiefile.
   242  func cookiefileVolume(secret string) (kube.Volume, kube.VolumeMount, string) {
   243  	// Separate secret-name/key-in-secret
   244  	parts := strings.SplitN(secret, "/", 2)
   245  	cookieSecret := parts[0]
   246  	var base string
   247  	if len(parts) == 1 {
   248  		base = parts[0] // Assume key-in-secret == secret-name
   249  	} else {
   250  		base = parts[1]
   251  	}
   252  	var cookiefileMode int32 = 0400 // u+r
   253  	vol := kube.Volume{
   254  		Name: "cookiefile",
   255  		VolumeSource: kube.VolumeSource{
   256  			Secret: &kube.SecretSource{
   257  				SecretName:  cookieSecret,
   258  				DefaultMode: &cookiefileMode,
   259  			},
   260  		},
   261  	}
   262  	mount := kube.VolumeMount{
   263  		Name:      vol.Name,
   264  		MountPath: "/secrets/cookiefile", // append base to flag
   265  		ReadOnly:  true,
   266  	}
   267  	return vol, mount, path.Join(mount.MountPath, base)
   268  }
   269  
   270  // CloneRefs constructs the container and volumes necessary to clone the refs requested by the ProwJob.
   271  //
   272  // The container checks out repositories specified by the ProwJob Refs to `codeMount`.
   273  // A log of what it checked out is written to `clone.json` in `logMount`.
   274  //
   275  // The container may need to mount SSH keys and/or cookiefiles in order to access private refs.
   276  // CloneRefs returns a list of volumes containing these secrets required by the container.
   277  func CloneRefs(pj kube.ProwJob, codeMount, logMount kube.VolumeMount) (*kube.Container, []kube.Refs, []kube.Volume, error) {
   278  	if pj.Spec.DecorationConfig == nil {
   279  		return nil, nil, nil, nil
   280  	}
   281  	if skip := pj.Spec.DecorationConfig.SkipCloning; skip != nil && *skip {
   282  		return nil, nil, nil, nil
   283  	}
   284  	var cloneVolumes []kube.Volume
   285  	var refs []kube.Refs // Do not return []*kube.Refs which we do not own
   286  	if pj.Spec.Refs != nil {
   287  		refs = append(refs, *pj.Spec.Refs)
   288  	}
   289  	for _, r := range pj.Spec.ExtraRefs {
   290  		refs = append(refs, r)
   291  	}
   292  	if len(refs) == 0 { // nothing to clone
   293  		return nil, nil, nil, nil
   294  	}
   295  	if codeMount.Name == "" || codeMount.MountPath == "" {
   296  		return nil, nil, nil, fmt.Errorf("codeMount must set Name and MountPath")
   297  	}
   298  	if logMount.Name == "" || logMount.MountPath == "" {
   299  		return nil, nil, nil, fmt.Errorf("logMount must set Name and MountPath")
   300  	}
   301  
   302  	var cloneMounts []kube.VolumeMount
   303  	var sshKeyPaths []string
   304  	for _, secret := range pj.Spec.DecorationConfig.SSHKeySecrets {
   305  		volume, mount := sshVolume(secret)
   306  		cloneMounts = append(cloneMounts, mount)
   307  		sshKeyPaths = append(sshKeyPaths, mount.MountPath)
   308  		cloneVolumes = append(cloneVolumes, volume)
   309  	}
   310  
   311  	var cloneArgs []string
   312  	var cookiefilePath string
   313  
   314  	if cp := pj.Spec.DecorationConfig.CookiefileSecret; cp != "" {
   315  		v, vm, vp := cookiefileVolume(cp)
   316  		cloneMounts = append(cloneMounts, vm)
   317  		cloneVolumes = append(cloneVolumes, v)
   318  		cookiefilePath = vp
   319  		cloneArgs = append(cloneArgs, "--cookiefile="+cookiefilePath)
   320  	}
   321  
   322  	env, err := cloneEnv(clonerefs.Options{
   323  		CookiePath:       cookiefilePath,
   324  		GitRefs:          refs,
   325  		GitUserEmail:     clonerefs.DefaultGitUserEmail,
   326  		GitUserName:      clonerefs.DefaultGitUserName,
   327  		HostFingerprints: pj.Spec.DecorationConfig.SSHHostFingerprints,
   328  		KeyFiles:         sshKeyPaths,
   329  		Log:              CloneLogPath(logMount),
   330  		SrcRoot:          codeMount.MountPath,
   331  	})
   332  	if err != nil {
   333  		return nil, nil, nil, fmt.Errorf("clone env: %v", err)
   334  	}
   335  
   336  	container := kube.Container{
   337  		Name:         cloneRefsName,
   338  		Image:        pj.Spec.DecorationConfig.UtilityImages.CloneRefs,
   339  		Command:      []string{cloneRefsCommand},
   340  		Args:         cloneArgs,
   341  		Env:          env,
   342  		VolumeMounts: append([]kube.VolumeMount{logMount, codeMount}, cloneMounts...),
   343  	}
   344  	return &container, refs, cloneVolumes, nil
   345  }
   346  
   347  func decorate(spec *kube.PodSpec, pj *kube.ProwJob, rawEnv map[string]string) error {
   348  	rawEnv[artifactsEnv] = artifactsPath
   349  	rawEnv[gopathEnv] = codeMountPath
   350  	logMount := kube.VolumeMount{
   351  		Name:      logMountName,
   352  		MountPath: logMountPath,
   353  	}
   354  	logVolume := kube.Volume{
   355  		Name: logMountName,
   356  		VolumeSource: kube.VolumeSource{
   357  			EmptyDir: &kube.EmptyDirVolumeSource{},
   358  		},
   359  	}
   360  
   361  	codeMount := kube.VolumeMount{
   362  		Name:      codeMountName,
   363  		MountPath: codeMountPath,
   364  	}
   365  	codeVolume := kube.Volume{
   366  		Name: codeMountName,
   367  		VolumeSource: kube.VolumeSource{
   368  			EmptyDir: &kube.EmptyDirVolumeSource{},
   369  		},
   370  	}
   371  
   372  	toolsMount := kube.VolumeMount{
   373  		Name:      toolsMountName,
   374  		MountPath: toolsMountPath,
   375  	}
   376  	toolsVolume := kube.Volume{
   377  		Name: toolsMountName,
   378  		VolumeSource: kube.VolumeSource{
   379  			EmptyDir: &kube.EmptyDirVolumeSource{},
   380  		},
   381  	}
   382  
   383  	gcsCredentialsMount := kube.VolumeMount{
   384  		Name:      gcsCredentialsMountName,
   385  		MountPath: gcsCredentialsMountPath,
   386  	}
   387  	gcsCredentialsVolume := kube.Volume{
   388  		Name: gcsCredentialsMountName,
   389  		VolumeSource: kube.VolumeSource{
   390  			Secret: &kube.SecretSource{
   391  				SecretName: pj.Spec.DecorationConfig.GCSCredentialsSecret,
   392  			},
   393  		},
   394  	}
   395  
   396  	cloner, refs, cloneVolumes, err := CloneRefs(*pj, codeMount, logMount)
   397  	if err != nil {
   398  		return fmt.Errorf("could not create clonerefs container: %v", err)
   399  	}
   400  	if cloner != nil {
   401  		spec.InitContainers = append([]kube.Container{*cloner}, spec.InitContainers...)
   402  	}
   403  
   404  	gcsOptions := gcsupload.Options{
   405  		// TODO: pass the artifact dir here too once we figure that out
   406  		GCSConfiguration:   pj.Spec.DecorationConfig.GCSConfiguration,
   407  		GcsCredentialsFile: fmt.Sprintf("%s/service-account.json", gcsCredentialsMountPath),
   408  		DryRun:             false,
   409  	}
   410  
   411  	initUploadOptions := initupload.Options{
   412  		Options: &gcsOptions,
   413  	}
   414  	if cloner != nil {
   415  		initUploadOptions.Log = CloneLogPath(logMount)
   416  	}
   417  
   418  	// TODO(fejta): use flags
   419  	initUploadConfigEnv, err := initupload.Encode(initUploadOptions)
   420  	if err != nil {
   421  		return fmt.Errorf("could not encode initupload configuration as JSON: %v", err)
   422  	}
   423  
   424  	entrypointLocation := fmt.Sprintf("%s/entrypoint", toolsMountPath)
   425  
   426  	spec.InitContainers = append(spec.InitContainers,
   427  		kube.Container{
   428  			Name:    "initupload",
   429  			Image:   pj.Spec.DecorationConfig.UtilityImages.InitUpload,
   430  			Command: []string{"/initupload"},
   431  			Env: kubeEnv(map[string]string{
   432  				initupload.JSONConfigEnvVar: initUploadConfigEnv,
   433  				downwardapi.JobSpecEnv:      rawEnv[downwardapi.JobSpecEnv], // TODO: shouldn't need this?
   434  			}),
   435  			VolumeMounts: []kube.VolumeMount{logMount, gcsCredentialsMount},
   436  		},
   437  		kube.Container{
   438  			Name:         "place-tools",
   439  			Image:        pj.Spec.DecorationConfig.UtilityImages.Entrypoint,
   440  			Command:      []string{"/bin/cp"},
   441  			Args:         []string{"/entrypoint", entrypointLocation},
   442  			VolumeMounts: []kube.VolumeMount{toolsMount},
   443  		},
   444  	)
   445  
   446  	wrapperOptions := wrapper.Options{
   447  		ProcessLog:   fmt.Sprintf("%s/process-log.txt", logMountPath),
   448  		MarkerFile:   fmt.Sprintf("%s/marker-file.txt", logMountPath),
   449  		MetadataFile: fmt.Sprintf("%s/metadata.json", artifactsPath),
   450  	}
   451  	// TODO(fejta): use flags
   452  	entrypointConfigEnv, err := entrypoint.Encode(entrypoint.Options{
   453  		Args:        append(spec.Containers[0].Command, spec.Containers[0].Args...),
   454  		Options:     &wrapperOptions,
   455  		Timeout:     pj.Spec.DecorationConfig.Timeout,
   456  		GracePeriod: pj.Spec.DecorationConfig.GracePeriod,
   457  		ArtifactDir: artifactsPath,
   458  	})
   459  	if err != nil {
   460  		return fmt.Errorf("could not encode entrypoint configuration as JSON: %v", err)
   461  	}
   462  	allEnv := rawEnv
   463  	allEnv[entrypoint.JSONConfigEnvVar] = entrypointConfigEnv
   464  
   465  	spec.Containers[0].Command = []string{entrypointLocation}
   466  	spec.Containers[0].Args = []string{}
   467  	spec.Containers[0].Env = append(spec.Containers[0].Env, kubeEnv(allEnv)...)
   468  	spec.Containers[0].VolumeMounts = append(spec.Containers[0].VolumeMounts, logMount, toolsMount)
   469  
   470  	gcsOptions.Items = append(gcsOptions.Items, artifactsPath)
   471  	// TODO(fejta): use flags
   472  	sidecarConfigEnv, err := sidecar.Encode(sidecar.Options{
   473  		GcsOptions:     &gcsOptions,
   474  		WrapperOptions: &wrapperOptions,
   475  	})
   476  	if err != nil {
   477  		return fmt.Errorf("could not encode sidecar configuration as JSON: %v", err)
   478  	}
   479  
   480  	spec.Containers = append(spec.Containers, kube.Container{
   481  		Name:    "sidecar",
   482  		Image:   pj.Spec.DecorationConfig.UtilityImages.Sidecar,
   483  		Command: []string{"/sidecar"},
   484  		Env: kubeEnv(map[string]string{
   485  			sidecar.JSONConfigEnvVar: sidecarConfigEnv,
   486  			downwardapi.JobSpecEnv:   rawEnv[downwardapi.JobSpecEnv], // TODO: shouldn't need this?
   487  		}),
   488  		VolumeMounts: []kube.VolumeMount{logMount, gcsCredentialsMount},
   489  	})
   490  	spec.Volumes = append(spec.Volumes, logVolume, toolsVolume, gcsCredentialsVolume)
   491  
   492  	if len(refs) > 0 {
   493  		spec.Containers[0].WorkingDir = clone.PathForRefs(codeMount.MountPath, refs[0])
   494  		spec.Containers[0].VolumeMounts = append(spec.Containers[0].VolumeMounts, codeMount)
   495  		spec.Volumes = append(spec.Volumes, append(cloneVolumes, codeVolume)...)
   496  	}
   497  
   498  	return nil
   499  }
   500  
   501  // kubeEnv transforms a mapping of environment variables
   502  // into their serialized form for a PodSpec, sorting by
   503  // the name of the env vars
   504  func kubeEnv(environment map[string]string) []v1.EnvVar {
   505  	var keys []string
   506  	for key := range environment {
   507  		keys = append(keys, key)
   508  	}
   509  	sort.Strings(keys)
   510  
   511  	var kubeEnvironment []v1.EnvVar
   512  	for _, key := range keys {
   513  		kubeEnvironment = append(kubeEnvironment, v1.EnvVar{
   514  			Name:  key,
   515  			Value: environment[key],
   516  		})
   517  	}
   518  
   519  	return kubeEnvironment
   520  }