github.com/jenkins-x/jx/v2@v2.1.155/pkg/tekton/syntax/pipeline.go (about)

     1  package syntax
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"encoding/json"
     7  	"fmt"
     8  	"os"
     9  	"path/filepath"
    10  	"regexp"
    11  	"sort"
    12  	"strconv"
    13  	"strings"
    14  	"time"
    15  
    16  	v1 "github.com/jenkins-x/jx-api/pkg/apis/jenkins.io/v1"
    17  	"github.com/jenkins-x/jx-logging/pkg/log"
    18  	"github.com/jenkins-x/jx/v2/pkg/kube/naming"
    19  	"github.com/jenkins-x/jx/v2/pkg/util"
    20  	"github.com/jenkins-x/jx/v2/pkg/versionstream"
    21  	"github.com/pkg/errors"
    22  	"github.com/tektoncd/pipeline/pkg/apis/pipeline"
    23  	tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
    24  	tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
    25  	corev1 "k8s.io/api/core/v1"
    26  	"k8s.io/apimachinery/pkg/api/equality"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/util/strategicpatch"
    29  	"k8s.io/client-go/kubernetes"
    30  	"knative.dev/pkg/apis"
    31  )
    32  
    33  const (
    34  	// GitMergeImage is the default image name that is used in the git merge step of a pipeline
    35  	GitMergeImage = "gcr.io/jenkinsxio/builder-jx"
    36  
    37  	// WorkingDirRoot is the root directory for working directories.
    38  	WorkingDirRoot = "/workspace"
    39  
    40  	// braceMatchingRegex matches "${inputs.params.foo}" so we can replace it with "$(inputs.params.foo)"
    41  	braceMatchingRegex = "(\\$(\\{(?P<var>inputs\\.params\\.[_a-zA-Z][_a-zA-Z0-9.-]*)\\}))"
    42  )
    43  
    44  var (
    45  	ipAddressRegistryRegex = regexp.MustCompile(`\d+\.\d+\.\d+\.\d+.\d+(:\d+)?`)
    46  
    47  	commandIsSkaffoldRegex = regexp.MustCompile(`export VERSION=.*? && skaffold build.*`)
    48  )
    49  
    50  // ParsedPipeline is the internal representation of the Pipeline, used to validate and create CRDs
    51  type ParsedPipeline struct {
    52  	Agent      *Agent          `json:"agent,omitempty"`
    53  	Env        []corev1.EnvVar `json:"env,omitempty"`
    54  	Options    *RootOptions    `json:"options,omitempty"`
    55  	Stages     []Stage         `json:"stages"`
    56  	Post       []Post          `json:"post,omitempty"`
    57  	WorkingDir *string         `json:"dir,omitempty"`
    58  
    59  	// Replaced by Env, retained for backwards compatibility
    60  	Environment []corev1.EnvVar `json:"environment,omitempty"`
    61  }
    62  
    63  // Agent defines where the pipeline, stage, or step should run.
    64  type Agent struct {
    65  	// One of label or image is required.
    66  	Label string `json:"label,omitempty"`
    67  	Image string `json:"image,omitempty"`
    68  
    69  	// Legacy fields from jenkinsfile.PipelineAgent
    70  	Container string `json:"container,omitempty"`
    71  	Dir       string `json:"dir,omitempty"`
    72  }
    73  
    74  // TimeoutUnit is used for calculating timeout duration
    75  type TimeoutUnit string
    76  
    77  // The available time units.
    78  const (
    79  	TimeoutUnitSeconds TimeoutUnit = "seconds"
    80  	TimeoutUnitMinutes TimeoutUnit = "minutes"
    81  	TimeoutUnitHours   TimeoutUnit = "hours"
    82  	TimeoutUnitDays    TimeoutUnit = "days"
    83  )
    84  
    85  // All possible time units, used for validation
    86  var allTimeoutUnits = []TimeoutUnit{TimeoutUnitSeconds, TimeoutUnitMinutes, TimeoutUnitHours, TimeoutUnitDays}
    87  
    88  func allTimeoutUnitsAsStrings() []string {
    89  	tu := make([]string, len(allTimeoutUnits))
    90  
    91  	for i, u := range allTimeoutUnits {
    92  		tu[i] = string(u)
    93  	}
    94  
    95  	return tu
    96  }
    97  
    98  // Timeout defines how long a stage or pipeline can run before timing out.
    99  type Timeout struct {
   100  	Time int64 `json:"time"`
   101  	// Has some sane default - probably seconds
   102  	Unit TimeoutUnit `json:"unit,omitempty"`
   103  }
   104  
   105  // ToDuration generates a duration struct from a Timeout
   106  func (t *Timeout) ToDuration() (*metav1.Duration, error) {
   107  	durationStr := ""
   108  	// TODO: Populate a default timeout unit, most likely seconds.
   109  	if t.Unit != "" {
   110  		durationStr = fmt.Sprintf("%d%c", t.Time, t.Unit[0])
   111  	} else {
   112  		durationStr = fmt.Sprintf("%ds", t.Time)
   113  	}
   114  
   115  	d, err := time.ParseDuration(durationStr)
   116  	if err != nil {
   117  		return nil, err
   118  	}
   119  	return &metav1.Duration{Duration: d}, nil
   120  }
   121  
   122  // RootOptions contains options that can be configured on either a pipeline or a stage
   123  type RootOptions struct {
   124  	Timeout *Timeout `json:"timeout,omitempty"`
   125  	Retry   int8     `json:"retry,omitempty"`
   126  	// ContainerOptions allows for advanced configuration of containers for a single stage or the whole
   127  	// pipeline, adding to configuration that can be configured through the syntax already. This includes things
   128  	// like CPU/RAM requests/limits, secrets, ports, etc. Some of these things will end up with native syntax approaches
   129  	// down the road.
   130  	ContainerOptions              *corev1.Container   `json:"containerOptions,omitempty"`
   131  	Sidecars                      []*corev1.Container `json:"sidecars,omitempty"`
   132  	Volumes                       []*corev1.Volume    `json:"volumes,omitempty"`
   133  	DistributeParallelAcrossNodes bool                `json:"distributeParallelAcrossNodes,omitempty"`
   134  	Tolerations                   []corev1.Toleration `json:"tolerations,omitempty"`
   135  	PodLabels                     map[string]string   `json:"podLabels,omitempty"`
   136  }
   137  
   138  // Stash defines files to be saved for use in a later stage, marked with a name
   139  type Stash struct {
   140  	Name string `json:"name"`
   141  	// Eventually make this optional so that you can do volumes instead
   142  	Files string `json:"files"`
   143  }
   144  
   145  // Unstash defines a previously-defined stash to be copied into this stage's workspace
   146  type Unstash struct {
   147  	Name string `json:"name"`
   148  	Dir  string `json:"dir,omitempty"`
   149  }
   150  
   151  // StageOptions contains both options that can be configured on either a pipeline or a stage, via
   152  // RootOptions, or stage-specific options.
   153  type StageOptions struct {
   154  	*RootOptions `json:",inline"`
   155  
   156  	// TODO: Not yet implemented in build-pipeline
   157  	Stash   *Stash   `json:"stash,omitempty"`
   158  	Unstash *Unstash `json:"unstash,omitempty"`
   159  
   160  	Workspace *string `json:"workspace,omitempty"`
   161  }
   162  
   163  // Step defines a single step, from the author's perspective, to be executed within a stage.
   164  type Step struct {
   165  	// An optional name to give the step for reporting purposes
   166  	Name string `json:"name,omitempty"`
   167  
   168  	// One of command, step, or loop is required.
   169  	Command string `json:"command,omitempty"`
   170  	// args is optional, but only allowed with command
   171  	Arguments []string `json:"args,omitempty"`
   172  	// dir is optional, but only allowed with command. Refers to subdirectory of workspace
   173  	Dir string `json:"dir,omitempty"`
   174  
   175  	Step string `json:"step,omitempty"`
   176  	// options is optional, but only allowed with step
   177  	// Also, we'll need to do some magic to do type verification during translation - i.e., this step wants a number
   178  	// for this option, so translate the string value for that option to a number.
   179  	Options map[string]string `json:"options,omitempty"`
   180  
   181  	Loop *Loop `json:"loop,omitempty"`
   182  
   183  	// agent can be overridden on a step
   184  	Agent *Agent `json:"agent,omitempty"`
   185  
   186  	// Image alows the docker image for a step to be specified
   187  	Image string `json:"image,omitempty"`
   188  
   189  	// env allows defining per-step environment variables
   190  	Env []corev1.EnvVar `json:"env,omitempty"`
   191  
   192  	// Legacy fields from jenkinsfile.PipelineStep before it was eliminated.
   193  	Comment   string  `json:"comment,omitempty"`
   194  	Groovy    string  `json:"groovy,omitempty"`
   195  	Steps     []*Step `json:"steps,omitempty"`
   196  	When      string  `json:"when,omitempty"`
   197  	Container string  `json:"container,omitempty"`
   198  	Sh        string  `json:"sh,omitempty"`
   199  }
   200  
   201  // Loop is a special step that defines a variable, a list of possible values for that variable, and a set of steps to
   202  // repeat for each value for the variable, with the variable set with that value in the environment for the execution of
   203  // those steps.
   204  type Loop struct {
   205  	// The variable name.
   206  	Variable string `json:"variable"`
   207  	// The list of values to iterate over
   208  	Values []string `json:"values"`
   209  	// The steps to run
   210  	Steps []Step `json:"steps"`
   211  }
   212  
   213  // Stage is a unit of work in a pipeline, corresponding either to a Task or a set of Tasks to be run sequentially or in
   214  // parallel with common configuration.
   215  type Stage struct {
   216  	Name       string          `json:"name"`
   217  	Agent      *Agent          `json:"agent,omitempty"`
   218  	Env        []corev1.EnvVar `json:"env,omitempty"`
   219  	Options    *StageOptions   `json:"options,omitempty"`
   220  	Steps      []Step          `json:"steps,omitempty"`
   221  	Stages     []Stage         `json:"stages,omitempty"`
   222  	Parallel   []Stage         `json:"parallel,omitempty"`
   223  	Post       []Post          `json:"post,omitempty"`
   224  	WorkingDir *string         `json:"dir,omitempty"`
   225  
   226  	// Replaced by Env, retained for backwards compatibility
   227  	Environment []corev1.EnvVar `json:"environment,omitempty"`
   228  }
   229  
   230  // PostCondition is used to specify under what condition a post action should be executed.
   231  type PostCondition string
   232  
   233  // Probably extensible down the road
   234  const (
   235  	PostConditionSuccess PostCondition = "success"
   236  	PostConditionFailure PostCondition = "failure"
   237  	PostConditionAlways  PostCondition = "always"
   238  )
   239  
   240  // Post contains a PostCondition and one more actions to be executed after a pipeline or stage if the condition is met.
   241  type Post struct {
   242  	// TODO: Conditional execution of something after a Task or Pipeline completes is not yet implemented
   243  	Condition PostCondition `json:"condition"`
   244  	Actions   []PostAction  `json:"actions"`
   245  }
   246  
   247  // PostAction contains the name of a built-in post action and options to pass to that action.
   248  type PostAction struct {
   249  	// TODO: Notifications are not yet supported in Build Pipeline per se.
   250  	Name string `json:"name"`
   251  	// Also, we'll need to do some magic to do type verification during translation - i.e., this action wants a number
   252  	// for this option, so translate the string value for that option to a number.
   253  	Options map[string]string `json:"options,omitempty"`
   254  }
   255  
   256  // StepOverrideType is used to specify whether the existing step should be replaced (default), new step(s) should be
   257  // prepended before the existing step, or new step(s) should be appended after the existing step.
   258  type StepOverrideType string
   259  
   260  // The available override types
   261  const (
   262  	StepOverrideReplace StepOverrideType = "replace"
   263  	StepOverrideBefore  StepOverrideType = "before"
   264  	StepOverrideAfter   StepOverrideType = "after"
   265  )
   266  
   267  // PipelineOverride allows for overriding named steps, stages, or pipelines in the build pack or default pipeline
   268  type PipelineOverride struct {
   269  	Pipeline         string              `json:"pipeline,omitempty"`
   270  	Stage            string              `json:"stage,omitempty"`
   271  	Name             string              `json:"name,omitempty"`
   272  	Step             *Step               `json:"step,omitempty"`
   273  	Steps            []*Step             `json:"steps,omitempty"`
   274  	Type             *StepOverrideType   `json:"type,omitempty"`
   275  	Agent            *Agent              `json:"agent,omitempty"`
   276  	ContainerOptions *corev1.Container   `json:"containerOptions,omitempty"`
   277  	Sidecars         []*corev1.Container `json:"sidecars,omitempty"`
   278  	Volumes          []*corev1.Volume    `json:"volumes,omitempty"`
   279  }
   280  
   281  var _ apis.Validatable = (*ParsedPipeline)(nil)
   282  
   283  // stageLabelName replaces invalid characters in stage names for label usage.
   284  func (s *Stage) stageLabelName() string {
   285  	return MangleToRfc1035Label(s.Name, "")
   286  }
   287  
   288  // GroovyBlock returns the groovy expression for this step
   289  // Legacy code for Jenkinsfile generation
   290  func (s *Step) GroovyBlock(parentIndent string) string {
   291  	var buffer bytes.Buffer
   292  	indent := parentIndent
   293  	if s.Comment != "" {
   294  		buffer.WriteString(indent)
   295  		buffer.WriteString("// ")
   296  		buffer.WriteString(s.Comment)
   297  		buffer.WriteString("\n")
   298  	}
   299  	if s.GetImage() != "" {
   300  		buffer.WriteString(indent)
   301  		buffer.WriteString("container('")
   302  		buffer.WriteString(s.GetImage())
   303  		buffer.WriteString("') {\n")
   304  	} else if s.Dir != "" {
   305  		buffer.WriteString(indent)
   306  		buffer.WriteString("dir('")
   307  		buffer.WriteString(s.Dir)
   308  		buffer.WriteString("') {\n")
   309  	} else if s.GetFullCommand() != "" {
   310  		buffer.WriteString(indent)
   311  		buffer.WriteString("sh \"")
   312  		buffer.WriteString(s.GetFullCommand())
   313  		buffer.WriteString("\"\n")
   314  	} else if s.Groovy != "" {
   315  		lines := strings.Split(s.Groovy, "\n")
   316  		lastIdx := len(lines) - 1
   317  		for i, line := range lines {
   318  			buffer.WriteString(indent)
   319  			buffer.WriteString(line)
   320  			if i >= lastIdx && len(s.Steps) > 0 {
   321  				buffer.WriteString(" {")
   322  			}
   323  			buffer.WriteString("\n")
   324  		}
   325  	}
   326  	childIndent := indent + "  "
   327  	for _, child := range s.Steps {
   328  		buffer.WriteString(child.GroovyBlock(childIndent))
   329  	}
   330  	return buffer.String()
   331  }
   332  
   333  // ToJenkinsfileStatements converts the step to one or more jenkinsfile statements
   334  // Legacy code for Jenkinsfile generation
   335  func (s *Step) ToJenkinsfileStatements() []*util.Statement {
   336  	statements := []*util.Statement{}
   337  	if s.Comment != "" {
   338  		statements = append(statements, &util.Statement{
   339  			Statement: "",
   340  		}, &util.Statement{
   341  			Statement: "// " + s.Comment,
   342  		})
   343  	}
   344  	if s.GetImage() != "" {
   345  		statements = append(statements, &util.Statement{
   346  			Function:  "container",
   347  			Arguments: []string{s.GetImage()},
   348  		})
   349  	} else if s.Dir != "" {
   350  		statements = append(statements, &util.Statement{
   351  			Function:  "dir",
   352  			Arguments: []string{s.Dir},
   353  		})
   354  	} else if s.GetFullCommand() != "" {
   355  		statements = append(statements, &util.Statement{
   356  			Statement: "sh \"" + s.GetFullCommand() + "\"",
   357  		})
   358  	} else if s.Groovy != "" {
   359  		lines := strings.Split(s.Groovy, "\n")
   360  		for _, line := range lines {
   361  			statements = append(statements, &util.Statement{
   362  				Statement: line,
   363  			})
   364  		}
   365  	}
   366  	if len(statements) > 0 {
   367  		last := statements[len(statements)-1]
   368  		for _, c := range s.Steps {
   369  			last.Children = append(last.Children, c.ToJenkinsfileStatements()...)
   370  		}
   371  	}
   372  	return statements
   373  }
   374  
   375  // Validate validates the step is populated correctly
   376  // Legacy code for Jenkinsfile generation
   377  func (s *Step) Validate() error {
   378  	if len(s.Steps) > 0 || s.GetCommand() != "" {
   379  		return nil
   380  	}
   381  	return fmt.Errorf("invalid step %#v as no child steps or command", s)
   382  }
   383  
   384  // PutAllEnvVars puts all the defined environment variables in the given map
   385  // Legacy code for Jenkinsfile generation
   386  func (s *Step) PutAllEnvVars(m map[string]string) {
   387  	for _, step := range s.Steps {
   388  		step.PutAllEnvVars(m)
   389  	}
   390  }
   391  
   392  // GetCommand gets the step's command to execute, opting for Command if set, then Sh.
   393  func (s *Step) GetCommand() string {
   394  	if s.Command != "" {
   395  		return s.Command
   396  	}
   397  
   398  	return s.Sh
   399  }
   400  
   401  // GetFullCommand gets the full command to execute, including arguments.
   402  func (s *Step) GetFullCommand() string {
   403  	cmd := s.GetCommand()
   404  
   405  	// If GetCommand() was an empty string, don't deal with arguments, just return.
   406  	if len(s.Arguments) > 0 && cmd != "" {
   407  		cmd = fmt.Sprintf("%s %s", cmd, strings.Join(s.Arguments, " "))
   408  	}
   409  
   410  	return cmd
   411  }
   412  
   413  // GetImage gets the step's image to run on, opting for Image if set, then Container.
   414  func (s *Step) GetImage() string {
   415  	if s.Image != "" {
   416  		return s.Image
   417  	}
   418  	if s.Agent != nil && s.Agent.Image != "" {
   419  		return s.Agent.Image
   420  	}
   421  
   422  	return s.Container
   423  }
   424  
   425  // DeepCopyForParsedPipeline returns a copy of the Agent with deprecated fields migrated to current ones.
   426  func (a *Agent) DeepCopyForParsedPipeline() *Agent {
   427  	agent := a.DeepCopy()
   428  	if agent.Container != "" {
   429  		agent.Image = agent.GetImage()
   430  		agent.Container = ""
   431  		agent.Label = ""
   432  	}
   433  
   434  	return agent
   435  }
   436  
   437  // Groovy returns the agent groovy expression for the agent or `any` if its blank
   438  // Legacy code for Jenkinsfile generation
   439  func (a *Agent) Groovy() string {
   440  	if a.Label != "" {
   441  		return fmt.Sprintf(`{
   442      label "%s"
   443    }`, a.Label)
   444  	}
   445  	// lets use any for Prow
   446  	return "any"
   447  }
   448  
   449  // GetImage gets the agent's image to run on, opting for Image if set, then Container.
   450  func (a *Agent) GetImage() string {
   451  	if a.Image != "" {
   452  		return a.Image
   453  	}
   454  
   455  	return a.Container
   456  }
   457  
   458  // MangleToRfc1035Label - Task/Step names need to be RFC 1035/1123 compliant DNS labels, so we mangle
   459  // them to make them compliant. Results should match the following regex and be
   460  // no more than 63 characters long:
   461  //     [a-z]([-a-z0-9]*[a-z0-9])?
   462  // cf. https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
   463  // body is assumed to have at least one ASCII letter.
   464  // suffix is assumed to be alphanumeric and non-empty.
   465  // TODO: Combine with kube.ToValidName (that function needs to handle lengths)
   466  func MangleToRfc1035Label(body string, suffix string) string {
   467  	const maxLabelLength = 63
   468  	maxBodyLength := maxLabelLength
   469  	if len(suffix) > 0 {
   470  		maxBodyLength = maxLabelLength - len(suffix) - 1 // Add an extra hyphen before the suffix
   471  	}
   472  	var sb strings.Builder
   473  	bufferedHyphen := false // Used to make sure we don't output consecutive hyphens.
   474  	for _, codepoint := range body {
   475  		toWrite := 0
   476  		if sb.Len() != 0 { // Digits and hyphens aren't allowed to be the first character
   477  			if codepoint == ' ' || codepoint == '-' || codepoint == '.' {
   478  				bufferedHyphen = true
   479  			} else if codepoint >= '0' && codepoint <= '9' {
   480  				toWrite = 1
   481  			}
   482  		}
   483  
   484  		if codepoint >= 'A' && codepoint <= 'Z' {
   485  			codepoint += ('a' - 'A') // Offset to make character lowercase
   486  			toWrite = 1
   487  		} else if codepoint >= 'a' && codepoint <= 'z' {
   488  			toWrite = 1
   489  		}
   490  
   491  		if toWrite > 0 {
   492  			if bufferedHyphen {
   493  				toWrite++
   494  			}
   495  			if sb.Len()+toWrite > maxBodyLength {
   496  				break
   497  			}
   498  			if bufferedHyphen {
   499  				sb.WriteRune('-')
   500  				bufferedHyphen = false
   501  			}
   502  			sb.WriteRune(codepoint)
   503  		}
   504  	}
   505  
   506  	if suffix != "" {
   507  		sb.WriteRune('-')
   508  		sb.WriteString(suffix)
   509  	}
   510  	return sb.String()
   511  }
   512  
   513  // GetEnv gets the environment for the ParsedPipeline, returning Env first and Environment if Env isn't populated.
   514  func (j *ParsedPipeline) GetEnv() []corev1.EnvVar {
   515  	if j != nil {
   516  		if len(j.Env) > 0 {
   517  			return j.Env
   518  		}
   519  
   520  		return j.Environment
   521  	}
   522  	return []corev1.EnvVar{}
   523  }
   524  
   525  // GetEnv gets the environment for the Stage, returning Env first and Environment if Env isn't populated.
   526  func (s *Stage) GetEnv() []corev1.EnvVar {
   527  	if len(s.Env) > 0 {
   528  		return s.Env
   529  	}
   530  
   531  	return s.Environment
   532  }
   533  
   534  // Validate checks the ParsedPipeline to find any errors in it, without validating against the cluster.
   535  func (j *ParsedPipeline) Validate(context context.Context) *apis.FieldError {
   536  	return j.ValidateInCluster(context, nil, "")
   537  }
   538  
   539  // ValidateInCluster checks the parsed ParsedPipeline to find any errors in it, including validation against the cluster.
   540  func (j *ParsedPipeline) ValidateInCluster(context context.Context, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   541  	if err := validateAgent(j.Agent).ViaField("agent"); err != nil {
   542  		return err
   543  	}
   544  
   545  	var volumes []*corev1.Volume
   546  	if j.Options != nil && len(j.Options.Volumes) > 0 {
   547  		volumes = append(volumes, j.Options.Volumes...)
   548  	}
   549  	if err := validateStages(j.Stages, j.Agent, volumes, kubeClient, ns); err != nil {
   550  		return err
   551  	}
   552  
   553  	if err := validateStageNames(j); err != nil {
   554  		return err
   555  	}
   556  
   557  	if err := validateRootOptions(j.Options, volumes, kubeClient, ns).ViaField("options"); err != nil {
   558  		return err
   559  	}
   560  
   561  	return nil
   562  }
   563  
   564  func validateAgent(a *Agent) *apis.FieldError {
   565  	// TODO: This is the same whether you specify an agent without label or image, or if you don't specify an agent
   566  	// at all, which is nonoptimal.
   567  	if a != nil {
   568  		if a.Container != "" {
   569  			return &apis.FieldError{
   570  				Message: "the container field is deprecated - please use image instead",
   571  				Paths:   []string{"container"},
   572  			}
   573  		}
   574  		if a.Dir != "" {
   575  			return &apis.FieldError{
   576  				Message: "the dir field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it.",
   577  				Paths:   []string{"dir"},
   578  			}
   579  		}
   580  
   581  		if a.Image != "" && a.Label != "" {
   582  			return apis.ErrMultipleOneOf("label", "image")
   583  		}
   584  
   585  		if a.Image == "" && a.Label == "" {
   586  			return apis.ErrMissingOneOf("label", "image")
   587  		}
   588  	}
   589  
   590  	return nil
   591  }
   592  
   593  var containsASCIILetter = regexp.MustCompile(`[a-zA-Z]`).MatchString
   594  
   595  func validateStage(s Stage, parentAgent *Agent, parentVolumes []*corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   596  	if len(s.Steps) == 0 && len(s.Stages) == 0 && len(s.Parallel) == 0 {
   597  		return apis.ErrMissingOneOf("steps", "stages", "parallel")
   598  	}
   599  
   600  	if !containsASCIILetter(s.Name) {
   601  		return &apis.FieldError{
   602  			Message: "Stage name must contain at least one ASCII letter",
   603  			Paths:   []string{"name"},
   604  		}
   605  	}
   606  
   607  	var volumes []*corev1.Volume
   608  
   609  	volumes = append(volumes, parentVolumes...)
   610  	if s.Options != nil && s.Options.RootOptions != nil && len(s.Options.Volumes) > 0 {
   611  		volumes = append(volumes, s.Options.Volumes...)
   612  	}
   613  
   614  	stageAgent := s.Agent.DeepCopy()
   615  	if stageAgent == nil {
   616  		stageAgent = parentAgent.DeepCopy()
   617  	}
   618  
   619  	if stageAgent == nil {
   620  		return &apis.FieldError{
   621  			Message: "No agent specified for stage or for its parent(s)",
   622  			Paths:   []string{"agent"},
   623  		}
   624  	}
   625  
   626  	if len(s.Steps) > 0 {
   627  		if len(s.Stages) > 0 || len(s.Parallel) > 0 {
   628  			return apis.ErrMultipleOneOf("steps", "stages", "parallel")
   629  		}
   630  		seenStepNames := make(map[string]int)
   631  		for i, step := range s.Steps {
   632  			if err := validateStep(step).ViaFieldIndex("steps", i); err != nil {
   633  				return err
   634  			}
   635  			if step.Name != "" {
   636  				if count, exists := seenStepNames[step.Name]; exists {
   637  					seenStepNames[step.Name] = count + 1
   638  				} else {
   639  					seenStepNames[step.Name] = 1
   640  				}
   641  			}
   642  		}
   643  
   644  		var duplicateSteps []string
   645  		for k, v := range seenStepNames {
   646  			if v > 1 {
   647  				duplicateSteps = append(duplicateSteps, k)
   648  			}
   649  		}
   650  		if len(duplicateSteps) > 0 {
   651  			sort.Strings(duplicateSteps)
   652  			return &apis.FieldError{
   653  				Message: "step names within a stage must be unique",
   654  				Details: fmt.Sprintf("The following step names in the stage %s are used more than once: %s", s.Name, strings.Join(duplicateSteps, ", ")),
   655  				Paths:   []string{"steps"},
   656  			}
   657  		}
   658  	}
   659  
   660  	if len(s.Stages) > 0 {
   661  		if len(s.Parallel) > 0 {
   662  			return apis.ErrMultipleOneOf("steps", "stages", "parallel")
   663  		}
   664  		for i, stage := range s.Stages {
   665  			if err := validateStage(stage, parentAgent, volumes, kubeClient, ns).ViaFieldIndex("stages", i); err != nil {
   666  				return err
   667  			}
   668  		}
   669  	}
   670  
   671  	if len(s.Parallel) > 0 {
   672  		for i, stage := range s.Parallel {
   673  			if err := validateStage(stage, parentAgent, volumes, kubeClient, ns).ViaFieldIndex("parallel", i); err != nil {
   674  				return nil
   675  			}
   676  		}
   677  	}
   678  
   679  	return validateStageOptions(s.Options, volumes, kubeClient, ns).ViaField("options")
   680  }
   681  
   682  func moreThanOneAreTrue(vals ...bool) bool {
   683  	count := 0
   684  
   685  	for _, v := range vals {
   686  		if v {
   687  			count++
   688  		}
   689  	}
   690  
   691  	return count > 1
   692  }
   693  
   694  func validateStep(s Step) *apis.FieldError {
   695  	// Special cases for when you use legacy build pack syntax inside a pipeline definition
   696  	if s.Container != "" {
   697  		return &apis.FieldError{
   698  			Message: "the container field is deprecated - please use image instead",
   699  			Paths:   []string{"container"},
   700  		}
   701  	}
   702  	if s.Groovy != "" {
   703  		return &apis.FieldError{
   704  			Message: "the groovy field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it.",
   705  			Paths:   []string{"groovy"},
   706  		}
   707  	}
   708  	if s.Comment != "" {
   709  		return &apis.FieldError{
   710  			Message: "the comment field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it.",
   711  			Paths:   []string{"comment"},
   712  		}
   713  	}
   714  	if s.When != "" {
   715  		return &apis.FieldError{
   716  			Message: "the when field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it.",
   717  			Paths:   []string{"when"},
   718  		}
   719  	}
   720  	if len(s.Steps) > 0 {
   721  		return &apis.FieldError{
   722  			Message: "the steps field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it and list the nested stages sequentially instead.",
   723  			Paths:   []string{"steps"},
   724  		}
   725  	}
   726  
   727  	if s.GetCommand() == "" && s.Step == "" && s.Loop == nil {
   728  		return apis.ErrMissingOneOf("command", "step", "loop")
   729  	}
   730  
   731  	if moreThanOneAreTrue(s.GetCommand() != "", s.Step != "", s.Loop != nil) {
   732  		return apis.ErrMultipleOneOf("command", "step", "loop")
   733  	}
   734  
   735  	if (s.GetCommand() != "" || s.Loop != nil) && len(s.Options) != 0 {
   736  		return &apis.FieldError{
   737  			Message: "Cannot set options for a command or a loop",
   738  			Paths:   []string{"options"},
   739  		}
   740  	}
   741  
   742  	if (s.Step != "" || s.Loop != nil) && len(s.Arguments) != 0 {
   743  		return &apis.FieldError{
   744  			Message: "Cannot set command-line arguments for a step or a loop",
   745  			Paths:   []string{"args"},
   746  		}
   747  	}
   748  
   749  	if err := validateLoop(s.Loop); err != nil {
   750  		return err.ViaField("loop")
   751  	}
   752  
   753  	if s.Agent != nil {
   754  		return validateAgent(s.Agent).ViaField("agent")
   755  	}
   756  	return nil
   757  }
   758  
   759  func validateLoop(l *Loop) *apis.FieldError {
   760  	if l != nil {
   761  		if l.Variable == "" {
   762  			return apis.ErrMissingField("variable")
   763  		}
   764  
   765  		if len(l.Steps) == 0 {
   766  			return apis.ErrMissingField("steps")
   767  		}
   768  
   769  		if len(l.Values) == 0 {
   770  			return apis.ErrMissingField("values")
   771  		}
   772  
   773  		for i, step := range l.Steps {
   774  			if err := validateStep(step).ViaFieldIndex("steps", i); err != nil {
   775  				return err
   776  			}
   777  		}
   778  	}
   779  
   780  	return nil
   781  }
   782  
   783  func validateStages(stages []Stage, parentAgent *Agent, parentVolumes []*corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   784  	if len(stages) == 0 {
   785  		return apis.ErrMissingField("stages")
   786  	}
   787  
   788  	for i, s := range stages {
   789  		if err := validateStage(s, parentAgent, parentVolumes, kubeClient, ns).ViaFieldIndex("stages", i); err != nil {
   790  			return err
   791  		}
   792  	}
   793  
   794  	return nil
   795  }
   796  
   797  func validateRootOptions(o *RootOptions, volumes []*corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   798  	if o != nil {
   799  		if o.Timeout != nil {
   800  			if err := validateTimeout(o.Timeout); err != nil {
   801  				return err.ViaField("timeout")
   802  			}
   803  		}
   804  
   805  		// TODO: retry will default to 0, so we're kinda stuck checking if it's less than zero here.
   806  		if o.Retry < 0 {
   807  			return &apis.FieldError{
   808  				Message: "Retry count cannot be negative",
   809  				Paths:   []string{"retry"},
   810  			}
   811  		}
   812  
   813  		for i, v := range o.Volumes {
   814  			if err := validateVolume(v, kubeClient, ns).ViaFieldIndex("volumes", i); err != nil {
   815  				return err
   816  			}
   817  		}
   818  
   819  		for i, s := range o.Sidecars {
   820  			if err := validateSidecarContainer(s, volumes).ViaFieldIndex("sidecars", i); err != nil {
   821  				return err
   822  			}
   823  		}
   824  
   825  		return validateContainerOptions(o.ContainerOptions, volumes).ViaField("containerOptions")
   826  	}
   827  
   828  	return nil
   829  }
   830  
   831  func validateVolume(v *corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   832  	if v != nil {
   833  		if v.Name == "" {
   834  			return apis.ErrMissingField("name")
   835  		}
   836  		if kubeClient != nil {
   837  			if v.Secret != nil {
   838  				_, err := kubeClient.CoreV1().Secrets(ns).Get(v.Secret.SecretName, metav1.GetOptions{})
   839  				if err != nil {
   840  					return &apis.FieldError{
   841  						Message: fmt.Sprintf("Secret %s does not exist, so cannot be used as a volume", v.Secret.SecretName),
   842  						Paths:   []string{"secretName"},
   843  					}
   844  				}
   845  			} else if v.PersistentVolumeClaim != nil {
   846  				_, err := kubeClient.CoreV1().PersistentVolumeClaims(ns).Get(v.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
   847  				if err != nil {
   848  					return &apis.FieldError{
   849  						Message: fmt.Sprintf("PVC %s does not exist, so cannot be used as a volume", v.PersistentVolumeClaim.ClaimName),
   850  						Paths:   []string{"claimName"},
   851  					}
   852  				}
   853  			}
   854  		}
   855  	}
   856  
   857  	return nil
   858  }
   859  
   860  func validateContainerOptions(c *corev1.Container, volumes []*corev1.Volume) *apis.FieldError {
   861  	if c != nil {
   862  		if len(c.Command) != 0 {
   863  			return &apis.FieldError{
   864  				Message: "Command cannot be specified in containerOptions",
   865  				Paths:   []string{"command"},
   866  			}
   867  		}
   868  		if len(c.Args) != 0 {
   869  			return &apis.FieldError{
   870  				Message: "Arguments cannot be specified in containerOptions",
   871  				Paths:   []string{"args"},
   872  			}
   873  		}
   874  		if c.Image != "" {
   875  			return &apis.FieldError{
   876  				Message: "Image cannot be specified in containerOptions",
   877  				Paths:   []string{"image"},
   878  			}
   879  		}
   880  		if c.WorkingDir != "" {
   881  			return &apis.FieldError{
   882  				Message: "WorkingDir cannot be specified in containerOptions",
   883  				Paths:   []string{"workingDir"},
   884  			}
   885  		}
   886  		if c.Name != "" {
   887  			return &apis.FieldError{
   888  				Message: "Name cannot be specified in containerOptions",
   889  				Paths:   []string{"name"},
   890  			}
   891  		}
   892  		if c.Stdin {
   893  			return &apis.FieldError{
   894  				Message: "Stdin cannot be specified in containerOptions",
   895  				Paths:   []string{"stdin"},
   896  			}
   897  		}
   898  		if c.TTY {
   899  			return &apis.FieldError{
   900  				Message: "TTY cannot be specified in containerOptions",
   901  				Paths:   []string{"tty"},
   902  			}
   903  		}
   904  		if len(c.VolumeMounts) > 0 {
   905  			for i, m := range c.VolumeMounts {
   906  				if !isVolumeMountValid(m, volumes) {
   907  					fieldErr := &apis.FieldError{
   908  						Message: fmt.Sprintf("Volume mount name %s not found in volumes for stage or pipeline", m.Name),
   909  						Paths:   []string{"name"},
   910  					}
   911  
   912  					return fieldErr.ViaFieldIndex("volumeMounts", i)
   913  				}
   914  			}
   915  		}
   916  	}
   917  
   918  	return nil
   919  }
   920  
   921  func validateSidecarContainer(c *corev1.Container, volumes []*corev1.Volume) *apis.FieldError {
   922  	if c != nil {
   923  		if c.Name == "" {
   924  			return &apis.FieldError{
   925  				Message: "Name cannot be empty in sidecar",
   926  				Paths:   []string{"name"},
   927  			}
   928  		}
   929  		if c.Image == "" {
   930  			return &apis.FieldError{
   931  				Message: "Image cannot be empty in sidecar",
   932  				Paths:   []string{"image"},
   933  			}
   934  		}
   935  		if len(c.VolumeMounts) > 0 {
   936  			for i, m := range c.VolumeMounts {
   937  				if !isVolumeMountValid(m, volumes) {
   938  					fieldErr := &apis.FieldError{
   939  						Message: fmt.Sprintf("Volume mount name %s not found in volumes for stage or pipeline", m.Name),
   940  						Paths:   []string{"name"},
   941  					}
   942  
   943  					return fieldErr.ViaFieldIndex("volumeMounts", i)
   944  				}
   945  			}
   946  		}
   947  	}
   948  
   949  	return nil
   950  }
   951  
   952  func isVolumeMountValid(mount corev1.VolumeMount, volumes []*corev1.Volume) bool {
   953  	foundVolume := false
   954  
   955  	for _, v := range volumes {
   956  		if v.Name == mount.Name {
   957  			foundVolume = true
   958  			break
   959  		}
   960  	}
   961  
   962  	return foundVolume
   963  }
   964  
   965  func validateStageOptions(o *StageOptions, volumes []*corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   966  	if o != nil {
   967  		if err := validateStash(o.Stash); err != nil {
   968  			return err.ViaField("stash")
   969  		}
   970  
   971  		if o.Unstash != nil {
   972  			if err := validateUnstash(o.Unstash); err != nil {
   973  				return err.ViaField("unstash")
   974  			}
   975  		}
   976  
   977  		if o.Workspace != nil {
   978  			if err := validateWorkspace(*o.Workspace); err != nil {
   979  				return err
   980  			}
   981  		}
   982  
   983  		if o.RootOptions != nil && o.RootOptions.DistributeParallelAcrossNodes {
   984  			return &apis.FieldError{
   985  				Message: "distributeParallelAcrossNodes cannot be used in a stage",
   986  				Paths:   []string{"distributeParallelAcrossNodes"},
   987  			}
   988  		}
   989  
   990  		return validateRootOptions(o.RootOptions, volumes, kubeClient, ns)
   991  	}
   992  
   993  	return nil
   994  }
   995  
   996  func validateTimeout(t *Timeout) *apis.FieldError {
   997  	if t != nil {
   998  		isAllowed := false
   999  		for _, allowed := range allTimeoutUnits {
  1000  			if t.Unit == allowed {
  1001  				isAllowed = true
  1002  			}
  1003  		}
  1004  
  1005  		if !isAllowed {
  1006  			return &apis.FieldError{
  1007  				Message: fmt.Sprintf("%s is not a valid time unit. Valid time units are %s", string(t.Unit),
  1008  					strings.Join(allTimeoutUnitsAsStrings(), ", ")),
  1009  				Paths: []string{"unit"},
  1010  			}
  1011  		}
  1012  
  1013  		if t.Time < 1 {
  1014  			return &apis.FieldError{
  1015  				Message: "Timeout must be greater than zero",
  1016  				Paths:   []string{"time"},
  1017  			}
  1018  		}
  1019  	}
  1020  
  1021  	return nil
  1022  }
  1023  
  1024  func validateUnstash(u *Unstash) *apis.FieldError {
  1025  	if u != nil {
  1026  		// TODO: Check to make sure the corresponding stash is defined somewhere
  1027  		if u.Name == "" {
  1028  			return &apis.FieldError{
  1029  				Message: "The unstash name must be provided",
  1030  				Paths:   []string{"name"},
  1031  			}
  1032  		}
  1033  	}
  1034  
  1035  	return nil
  1036  }
  1037  
  1038  func validateStash(s *Stash) *apis.FieldError {
  1039  	if s != nil {
  1040  		if s.Name == "" {
  1041  			return &apis.FieldError{
  1042  				Message: "The stash name must be provided",
  1043  				Paths:   []string{"name"},
  1044  			}
  1045  		}
  1046  		if s.Files == "" {
  1047  			return &apis.FieldError{
  1048  				Message: "files to stash must be provided",
  1049  				Paths:   []string{"files"},
  1050  			}
  1051  		}
  1052  	}
  1053  
  1054  	return nil
  1055  }
  1056  
  1057  func validateWorkspace(w string) *apis.FieldError {
  1058  	if w == "" {
  1059  		return &apis.FieldError{
  1060  			Message: "The workspace name must be unspecified or non-empty",
  1061  			Paths:   []string{"workspace"},
  1062  		}
  1063  	}
  1064  
  1065  	return nil
  1066  }
  1067  
  1068  // EnvMapToSlice transforms a map of environment variables into a slice that can be used in container configuration
  1069  func EnvMapToSlice(envMap map[string]corev1.EnvVar) []corev1.EnvVar {
  1070  	env := make([]corev1.EnvVar, 0, len(envMap))
  1071  
  1072  	// Avoid nondeterministic results by sorting the keys and appending vars in that order.
  1073  	var envVars []string
  1074  	for k := range envMap {
  1075  		envVars = append(envVars, k)
  1076  	}
  1077  	sort.Strings(envVars)
  1078  
  1079  	for _, envVar := range envVars {
  1080  		env = append(env, envMap[envVar])
  1081  	}
  1082  
  1083  	return env
  1084  }
  1085  
  1086  // GetPodLabels returns the optional additional labels to apply to all pods for this pipeline. The labels and their values
  1087  // will be converted to RFC1035-compliant strings.
  1088  func (j *ParsedPipeline) GetPodLabels() map[string]string {
  1089  	sanitizedLabels := make(map[string]string)
  1090  	if j.Options != nil {
  1091  		for k, v := range j.Options.PodLabels {
  1092  			sanitizedKey := MangleToRfc1035Label(k, "")
  1093  			sanitizedValue := MangleToRfc1035Label(v, "")
  1094  			if sanitizedKey != k || sanitizedValue != v {
  1095  				log.Logger().Infof("Converted custom label/value '%s' to '%s' to conform to Kubernetes label requirements",
  1096  					util.ColorInfo(k+"="+v), util.ColorInfo(sanitizedKey+"="+sanitizedValue))
  1097  			}
  1098  			sanitizedLabels[sanitizedKey] = sanitizedValue
  1099  		}
  1100  	}
  1101  	return sanitizedLabels
  1102  }
  1103  
  1104  // GetTolerations returns the tolerations configured in the root options for this pipeline, if any.
  1105  func (j *ParsedPipeline) GetTolerations() []corev1.Toleration {
  1106  	if j.Options != nil {
  1107  		return j.Options.Tolerations
  1108  	}
  1109  	return nil
  1110  }
  1111  
  1112  // GetPossibleAffinityPolicy takes the pipeline name and returns the appropriate affinity policy for pods in this
  1113  // pipeline given its configuration, specifically of options.distributeParallelAcrossNodes.
  1114  func (j *ParsedPipeline) GetPossibleAffinityPolicy(name string) *corev1.Affinity {
  1115  	if j.Options != nil && j.Options.DistributeParallelAcrossNodes {
  1116  
  1117  		antiAffinityLabels := make(map[string]string)
  1118  		if len(j.Options.PodLabels) > 0 {
  1119  			antiAffinityLabels = util.MergeMaps(j.GetPodLabels())
  1120  		} else {
  1121  			antiAffinityLabels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = name
  1122  		}
  1123  		return &corev1.Affinity{
  1124  			PodAntiAffinity: &corev1.PodAntiAffinity{
  1125  				RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{
  1126  					LabelSelector: &metav1.LabelSelector{
  1127  						MatchLabels: antiAffinityLabels,
  1128  					},
  1129  					TopologyKey: "kubernetes.io/hostname",
  1130  				}},
  1131  			},
  1132  		}
  1133  	}
  1134  	return nil
  1135  }
  1136  
  1137  // StepPlaceholderReplacementArgs specifies the arguments required for replacing placeholders in build pack directories.
  1138  type StepPlaceholderReplacementArgs struct {
  1139  	WorkspaceDir      string
  1140  	GitName           string
  1141  	GitOrg            string
  1142  	GitHost           string
  1143  	DockerRegistry    string
  1144  	DockerRegistryOrg string
  1145  	ProjectID         string
  1146  	KanikoImage       string
  1147  	UseKaniko         bool
  1148  }
  1149  
  1150  func (p *StepPlaceholderReplacementArgs) workingDirAsPointer() *string {
  1151  	// TODO: Is there a better way to ensure that we're creating a pointer of a copy of the string?
  1152  	copyOfWorkingDir := p.WorkspaceDir
  1153  	return &copyOfWorkingDir
  1154  }
  1155  
  1156  // ReplacePlaceholdersInStepAndStageDirs traverses this pipeline's stages and any nested stages for any steps (and any nested steps)
  1157  // within the stages, and replaces "REPLACE_ME_..." placeholders in those steps' directories.
  1158  func (j *ParsedPipeline) ReplacePlaceholdersInStepAndStageDirs(args StepPlaceholderReplacementArgs) {
  1159  	var stages []Stage
  1160  	for _, s := range j.Stages {
  1161  		s.replacePlaceholdersInStage(j.WorkingDir, args)
  1162  		stages = append(stages, s)
  1163  	}
  1164  	j.Stages = stages
  1165  }
  1166  
  1167  func (s *Stage) replacePlaceholdersInStage(parentDir *string, args StepPlaceholderReplacementArgs) {
  1168  	var steps []Step
  1169  	var stages []Stage
  1170  	var parallel []Stage
  1171  	// If there's no working directory and this stage contains steps, we should set a stage directory
  1172  	if s.WorkingDir == nil && len(s.Steps) > 0 {
  1173  		// If there's no parent working directory, use the default provided.
  1174  		if parentDir == nil {
  1175  			s.WorkingDir = args.workingDirAsPointer()
  1176  		} else {
  1177  			s.WorkingDir = parentDir
  1178  		}
  1179  	}
  1180  	s.WorkingDir = replacePlaceholdersInDir(s.WorkingDir, args)
  1181  	for _, step := range s.Steps {
  1182  		step.replacePlaceholdersInStep(args)
  1183  		steps = append(steps, step)
  1184  	}
  1185  	for _, nested := range s.Stages {
  1186  		nested.replacePlaceholdersInStage(s.WorkingDir, args)
  1187  		stages = append(stages, nested)
  1188  	}
  1189  	for _, p := range s.Parallel {
  1190  		p.replacePlaceholdersInStage(s.WorkingDir, args)
  1191  		parallel = append(parallel, p)
  1192  	}
  1193  	s.Steps = steps
  1194  	s.Stages = stages
  1195  	s.Parallel = parallel
  1196  }
  1197  
  1198  func replacePlaceholdersInDir(originalDir *string, args StepPlaceholderReplacementArgs) *string {
  1199  	if originalDir == nil || *originalDir == "" {
  1200  		return originalDir
  1201  	}
  1202  	dir := *originalDir
  1203  	// Replace the Go buildpack path with the correct location for Tekton builds.
  1204  	dir = strings.Replace(dir, "/home/jenkins/go/src/REPLACE_ME_GIT_PROVIDER/REPLACE_ME_ORG/REPLACE_ME_APP_NAME", args.WorkspaceDir, -1)
  1205  
  1206  	dir = strings.Replace(dir, util.PlaceHolderAppName, strings.ToLower(args.GitName), -1)
  1207  	dir = strings.Replace(dir, util.PlaceHolderOrg, strings.ToLower(args.GitOrg), -1)
  1208  	dir = strings.Replace(dir, util.PlaceHolderGitProvider, strings.ToLower(args.GitHost), -1)
  1209  	dir = strings.Replace(dir, util.PlaceHolderDockerRegistryOrg, strings.ToLower(args.DockerRegistryOrg), -1)
  1210  
  1211  	if strings.HasPrefix(dir, "./") {
  1212  		dir = args.WorkspaceDir + strings.TrimPrefix(dir, ".")
  1213  	}
  1214  	if !filepath.IsAbs(dir) {
  1215  		dir = filepath.Join(args.WorkspaceDir, dir)
  1216  	}
  1217  	return &dir
  1218  }
  1219  
  1220  func (s *Step) replacePlaceholdersInStep(args StepPlaceholderReplacementArgs) {
  1221  	if s.GetCommand() != "" {
  1222  		s.modifyStep(args)
  1223  		s.Dir = *replacePlaceholdersInDir(&s.Dir, args)
  1224  	}
  1225  	var steps []*Step
  1226  	for _, nested := range s.Steps {
  1227  		nested.replacePlaceholdersInStep(args)
  1228  		steps = append(steps, nested)
  1229  	}
  1230  	s.Steps = steps
  1231  	if s.Loop != nil {
  1232  		var loopSteps []Step
  1233  		for _, nested := range s.Loop.Steps {
  1234  			nested.replacePlaceholdersInStep(args)
  1235  			loopSteps = append(loopSteps, nested)
  1236  		}
  1237  		s.Loop.Steps = loopSteps
  1238  	}
  1239  }
  1240  
  1241  // modifyStep allows a container step to be modified to do something different
  1242  func (s *Step) modifyStep(params StepPlaceholderReplacementArgs) {
  1243  	if params.UseKaniko {
  1244  		if strings.HasPrefix(s.GetCommand(), "skaffold build") ||
  1245  			(len(s.Arguments) > 0 && strings.HasPrefix(strings.Join(s.Arguments[1:], " "), "skaffold build")) ||
  1246  			commandIsSkaffoldRegex.MatchString(s.GetCommand()) {
  1247  
  1248  			sourceDir := params.WorkspaceDir
  1249  			dockerfile := filepath.Join(sourceDir, "Dockerfile")
  1250  			localRepo := params.DockerRegistry
  1251  			destination := params.DockerRegistry + "/" + params.DockerRegistryOrg + "/" + naming.ToValidName(params.GitName)
  1252  
  1253  			args := []string{"--cache=true", "--cache-dir=/workspace",
  1254  				"--context=" + sourceDir,
  1255  				"--dockerfile=" + dockerfile,
  1256  				"--destination=" + destination + ":${inputs.params.version}",
  1257  				"--cache-repo=" + localRepo + "/" + params.ProjectID + "/cache",
  1258  			}
  1259  			if localRepo != "gcr.io" {
  1260  				args = append(args, "--skip-tls-verify-registry="+localRepo)
  1261  			}
  1262  
  1263  			if ipAddressRegistryRegex.MatchString(localRepo) {
  1264  				args = append(args, "--insecure")
  1265  			}
  1266  
  1267  			s.Command = "/kaniko/executor"
  1268  			s.Arguments = args
  1269  
  1270  			s.Image = params.KanikoImage
  1271  		}
  1272  	}
  1273  }
  1274  
  1275  // AddContainerEnvVarsToPipeline allows for adding a slice of container environment variables directly to the
  1276  // pipeline, if they're not already defined.
  1277  func (j *ParsedPipeline) AddContainerEnvVarsToPipeline(origEnv []corev1.EnvVar) {
  1278  	if len(origEnv) > 0 {
  1279  		envMap := make(map[string]corev1.EnvVar)
  1280  
  1281  		// Add the container env vars first.
  1282  		for _, e := range origEnv {
  1283  			if e.ValueFrom == nil {
  1284  				envMap[e.Name] = corev1.EnvVar{
  1285  					Name:  e.Name,
  1286  					Value: e.Value,
  1287  				}
  1288  			}
  1289  		}
  1290  
  1291  		// Overwrite with the existing pipeline environment, if it exists
  1292  		for _, e := range j.GetEnv() {
  1293  			envMap[e.Name] = e
  1294  		}
  1295  
  1296  		env := make([]corev1.EnvVar, 0, len(envMap))
  1297  
  1298  		// Avoid nondeterministic results by sorting the keys and appending vars in that order.
  1299  		var envVars []string
  1300  		for k := range envMap {
  1301  			envVars = append(envVars, k)
  1302  		}
  1303  		sort.Strings(envVars)
  1304  
  1305  		for _, envVar := range envVars {
  1306  			env = append(env, envMap[envVar])
  1307  		}
  1308  
  1309  		j.Env = env
  1310  	}
  1311  }
  1312  
  1313  func scopedEnv(newEnv []corev1.EnvVar, parentEnv []corev1.EnvVar) []corev1.EnvVar {
  1314  	if len(parentEnv) == 0 && len(newEnv) == 0 {
  1315  		return nil
  1316  	}
  1317  	return CombineEnv(newEnv, parentEnv)
  1318  }
  1319  
  1320  // CombineEnv combines the two environments into a single unified slice where
  1321  // the `newEnv` overrides anything in the `parentEnv`
  1322  func CombineEnv(newEnv []corev1.EnvVar, parentEnv []corev1.EnvVar) []corev1.EnvVar {
  1323  	envMap := make(map[string]corev1.EnvVar)
  1324  
  1325  	for _, e := range parentEnv {
  1326  		envMap[e.Name] = e
  1327  	}
  1328  
  1329  	for _, e := range newEnv {
  1330  		envMap[e.Name] = e
  1331  	}
  1332  
  1333  	return EnvMapToSlice(envMap)
  1334  }
  1335  
  1336  type transformedStage struct {
  1337  	Stage Stage
  1338  	// Only one of Sequential, Parallel, and Task is non-empty
  1339  	Sequential []*transformedStage
  1340  	Parallel   []*transformedStage
  1341  	Task       *tektonv1alpha1.Task
  1342  	// PipelineTask is non-empty only if Task is non-empty, but it is populated
  1343  	// after Task is populated so the reverse is not true.
  1344  	PipelineTask *tektonv1alpha1.PipelineTask
  1345  	// The depth of this stage in the full tree of stages
  1346  	Depth int8
  1347  	// The parallel or sequntial stage enclosing this stage, or nil if this stage is at top level
  1348  	EnclosingStage *transformedStage
  1349  	// The stage immediately before this stage at the same depth, or nil if there is no such stage
  1350  	PreviousSiblingStage *transformedStage
  1351  	// TODO: Add the equivalent reverse relationship
  1352  }
  1353  
  1354  func (ts transformedStage) toPipelineStructureStage() v1.PipelineStructureStage {
  1355  	s := v1.PipelineStructureStage{
  1356  		Name:  ts.Stage.Name,
  1357  		Depth: ts.Depth,
  1358  	}
  1359  
  1360  	if ts.EnclosingStage != nil {
  1361  		s.Parent = &ts.EnclosingStage.Stage.Name
  1362  	}
  1363  
  1364  	if ts.PreviousSiblingStage != nil {
  1365  		s.Previous = &ts.PreviousSiblingStage.Stage.Name
  1366  	}
  1367  	// TODO: Add the equivalent reverse relationship
  1368  
  1369  	if ts.PipelineTask != nil {
  1370  		s.TaskRef = &ts.PipelineTask.TaskRef.Name
  1371  	}
  1372  
  1373  	if len(ts.Parallel) > 0 {
  1374  		for _, n := range ts.Parallel {
  1375  			s.Parallel = append(s.Parallel, n.Stage.Name)
  1376  		}
  1377  	}
  1378  
  1379  	if len(ts.Sequential) > 0 {
  1380  		for _, n := range ts.Sequential {
  1381  			s.Stages = append(s.Stages, n.Stage.Name)
  1382  		}
  1383  	}
  1384  
  1385  	return s
  1386  }
  1387  
  1388  func (ts transformedStage) getAllAsPipelineStructureStages() []v1.PipelineStructureStage {
  1389  	var stages []v1.PipelineStructureStage
  1390  
  1391  	stages = append(stages, ts.toPipelineStructureStage())
  1392  
  1393  	if len(ts.Parallel) > 0 {
  1394  		for _, n := range ts.Parallel {
  1395  			stages = append(stages, n.getAllAsPipelineStructureStages()...)
  1396  		}
  1397  	}
  1398  
  1399  	if len(ts.Sequential) > 0 {
  1400  		for _, n := range ts.Sequential {
  1401  			stages = append(stages, n.getAllAsPipelineStructureStages()...)
  1402  		}
  1403  	}
  1404  
  1405  	return stages
  1406  }
  1407  
  1408  func (ts transformedStage) isSequential() bool {
  1409  	return len(ts.Sequential) > 0
  1410  }
  1411  
  1412  func (ts transformedStage) isParallel() bool {
  1413  	return len(ts.Parallel) > 0
  1414  }
  1415  
  1416  func (ts transformedStage) getLinearTasks() []*tektonv1alpha1.Task {
  1417  	if ts.isSequential() {
  1418  		var tasks []*tektonv1alpha1.Task
  1419  		for _, seqTs := range ts.Sequential {
  1420  			tasks = append(tasks, seqTs.getLinearTasks()...)
  1421  		}
  1422  		return tasks
  1423  	} else if ts.isParallel() {
  1424  		var tasks []*tektonv1alpha1.Task
  1425  		for _, parTs := range ts.Parallel {
  1426  			tasks = append(tasks, parTs.getLinearTasks()...)
  1427  		}
  1428  		return tasks
  1429  	} else {
  1430  		return []*tektonv1alpha1.Task{ts.Task}
  1431  	}
  1432  }
  1433  
  1434  // If the workspace is nil, sets it to the parent's workspace
  1435  func (ts *transformedStage) computeWorkspace(parentWorkspace string) {
  1436  	if ts.Stage.Options == nil {
  1437  		ts.Stage.Options = &StageOptions{
  1438  			RootOptions: &RootOptions{},
  1439  		}
  1440  	}
  1441  	if ts.Stage.Options.Workspace == nil {
  1442  		ts.Stage.Options.Workspace = &parentWorkspace
  1443  	}
  1444  }
  1445  
  1446  type stageToTaskParams struct {
  1447  	parentParams         CRDsFromPipelineParams
  1448  	stage                Stage
  1449  	baseWorkingDir       *string
  1450  	parentEnv            []corev1.EnvVar
  1451  	parentAgent          *Agent
  1452  	parentWorkspace      string
  1453  	parentContainer      *corev1.Container
  1454  	parentSidecars       []*corev1.Container
  1455  	parentVolumes        []*corev1.Volume
  1456  	depth                int8
  1457  	enclosingStage       *transformedStage
  1458  	previousSiblingStage *transformedStage
  1459  }
  1460  
  1461  func stageToTask(params stageToTaskParams) (*transformedStage, error) {
  1462  	if len(params.stage.Post) != 0 {
  1463  		return nil, errors.New("post on stages not yet supported")
  1464  	}
  1465  
  1466  	stageContainer := &corev1.Container{}
  1467  	var stageSidecars []*corev1.Container
  1468  	var stageVolumes []*corev1.Volume
  1469  
  1470  	if params.stage.Options != nil {
  1471  		o := params.stage.Options
  1472  		if o.RootOptions == nil {
  1473  			o.RootOptions = &RootOptions{}
  1474  		} else {
  1475  			if o.Timeout != nil {
  1476  				return nil, errors.New("Timeout on stage not yet supported")
  1477  			}
  1478  			if o.ContainerOptions != nil {
  1479  				stageContainer = o.ContainerOptions
  1480  			}
  1481  			stageSidecars = o.Sidecars
  1482  			stageVolumes = o.Volumes
  1483  		}
  1484  		if o.Stash != nil {
  1485  			return nil, errors.New("Stash on stage not yet supported")
  1486  		}
  1487  		if o.Unstash != nil {
  1488  			return nil, errors.New("Unstash on stage not yet supported")
  1489  		}
  1490  	}
  1491  
  1492  	// Don't overwrite the inherited working dir if we don't have one specified here.
  1493  	if params.stage.WorkingDir != nil {
  1494  		params.baseWorkingDir = params.stage.WorkingDir
  1495  	}
  1496  
  1497  	if params.parentContainer != nil {
  1498  		merged, err := MergeContainers(params.parentContainer, stageContainer)
  1499  		if err != nil {
  1500  			return nil, errors.Wrapf(err, "Error merging stage and parent container overrides: %s", err)
  1501  		}
  1502  		stageContainer = merged
  1503  	}
  1504  	stageSidecars = append(stageSidecars, params.parentSidecars...)
  1505  	stageVolumes = append(stageVolumes, params.parentVolumes...)
  1506  
  1507  	env := scopedEnv(params.stage.GetEnv(), params.parentEnv)
  1508  
  1509  	agent := params.stage.Agent.DeepCopy()
  1510  
  1511  	if agent == nil {
  1512  		agent = params.parentAgent.DeepCopy()
  1513  	}
  1514  
  1515  	stepCounter := 0
  1516  	defaultTaskSpec, err := getDefaultTaskSpec(env, stageContainer, params.parentParams.DefaultImage, params.parentParams.VersionsDir)
  1517  	if err != nil {
  1518  		return nil, err
  1519  	}
  1520  
  1521  	if len(params.stage.Steps) > 0 {
  1522  		t := &tektonv1alpha1.Task{
  1523  			TypeMeta: metav1.TypeMeta{
  1524  				APIVersion: TektonAPIVersion,
  1525  				Kind:       "Task",
  1526  			},
  1527  			ObjectMeta: metav1.ObjectMeta{
  1528  				Namespace: params.parentParams.Namespace,
  1529  				Name:      MangleToRfc1035Label(fmt.Sprintf("%s-%s", params.parentParams.PipelineIdentifier, params.stage.Name), params.parentParams.BuildIdentifier),
  1530  				Labels:    util.MergeMaps(params.parentParams.Labels, map[string]string{LabelStageName: params.stage.stageLabelName()}),
  1531  			},
  1532  		}
  1533  		// Only add the default git merge step if this is the first actual step stage - including if the stage is one of
  1534  		// N stages within a parallel stage, and that parallel stage is the first stage in the pipeline
  1535  		if params.previousSiblingStage == nil && isNestedFirstStepsStage(params.enclosingStage) {
  1536  			t.Spec = defaultTaskSpec
  1537  		}
  1538  		prependedSteps, err := builderHomeStep(env, stageContainer, params.parentParams.DefaultImage, params.parentParams.VersionsDir)
  1539  		if err != nil {
  1540  			return nil, err
  1541  		}
  1542  		t.Spec.Steps = append(prependedSteps, t.Spec.Steps...)
  1543  		t.SetDefaults(context.Background())
  1544  
  1545  		ws := &tektonv1alpha1.TaskResource{
  1546  			ResourceDeclaration: tektonv1alpha1.ResourceDeclaration{
  1547  				Name:       "workspace",
  1548  				TargetPath: params.parentParams.SourceDir,
  1549  				Type:       tektonv1alpha1.PipelineResourceTypeGit,
  1550  			},
  1551  		}
  1552  
  1553  		t.Spec.Inputs = &tektonv1alpha1.Inputs{
  1554  			Resources: []tektonv1alpha1.TaskResource{*ws},
  1555  		}
  1556  
  1557  		t.Spec.Outputs = &tektonv1alpha1.Outputs{
  1558  			Resources: []tektonv1alpha1.TaskResource{*ws},
  1559  		}
  1560  
  1561  		for _, sidecar := range stageSidecars {
  1562  			if sidecar != nil {
  1563  				t.Spec.Sidecars = append(t.Spec.Sidecars, tektonv1beta1.Sidecar{
  1564  					Container: *sidecar,
  1565  				})
  1566  			}
  1567  		}
  1568  
  1569  		// We don't want to dupe volumes for the Task if there are multiple steps
  1570  		volumes := make(map[string]corev1.Volume)
  1571  
  1572  		for _, v := range stageVolumes {
  1573  			volumes[v.Name] = *v
  1574  		}
  1575  
  1576  		for _, step := range params.stage.Steps {
  1577  			actualSteps, stepVolumes, newCounter, err := generateSteps(generateStepsParams{
  1578  				stageParams:     params,
  1579  				step:            step,
  1580  				inheritedAgent:  agent.Image,
  1581  				env:             env,
  1582  				parentContainer: stageContainer,
  1583  				stepCounter:     stepCounter,
  1584  			})
  1585  			if err != nil {
  1586  				return nil, err
  1587  			}
  1588  
  1589  			stepCounter = newCounter
  1590  
  1591  			t.Spec.Steps = append(t.Spec.Steps, actualSteps...)
  1592  			for k, v := range stepVolumes {
  1593  				volumes[k] = v
  1594  			}
  1595  		}
  1596  
  1597  		// Avoid nondeterministic results by sorting the keys and appending volumes in that order.
  1598  		var volNames []string
  1599  		for k := range volumes {
  1600  			volNames = append(volNames, k)
  1601  		}
  1602  		sort.Strings(volNames)
  1603  
  1604  		for _, v := range volNames {
  1605  			t.Spec.Volumes = append(t.Spec.Volumes, volumes[v])
  1606  		}
  1607  
  1608  		ts := transformedStage{Stage: params.stage, Task: t, Depth: params.depth, EnclosingStage: params.enclosingStage, PreviousSiblingStage: params.previousSiblingStage}
  1609  		ts.computeWorkspace(params.parentWorkspace)
  1610  		return &ts, nil
  1611  	}
  1612  	if len(params.stage.Stages) > 0 {
  1613  		var tasks []*transformedStage
  1614  		ts := transformedStage{Stage: params.stage, Depth: params.depth, EnclosingStage: params.enclosingStage, PreviousSiblingStage: params.previousSiblingStage}
  1615  		ts.computeWorkspace(params.parentWorkspace)
  1616  
  1617  		for i, nested := range params.stage.Stages {
  1618  			var nestedPreviousSibling *transformedStage
  1619  			if i > 0 {
  1620  				nestedPreviousSibling = tasks[i-1]
  1621  			}
  1622  			nestedTask, err := stageToTask(stageToTaskParams{
  1623  				parentParams:         params.parentParams,
  1624  				stage:                nested,
  1625  				baseWorkingDir:       params.baseWorkingDir,
  1626  				parentEnv:            env,
  1627  				parentAgent:          agent,
  1628  				parentWorkspace:      *ts.Stage.Options.Workspace,
  1629  				parentContainer:      stageContainer,
  1630  				parentSidecars:       stageSidecars,
  1631  				parentVolumes:        stageVolumes,
  1632  				depth:                params.depth + 1,
  1633  				enclosingStage:       &ts,
  1634  				previousSiblingStage: nestedPreviousSibling,
  1635  			})
  1636  			if err != nil {
  1637  				return nil, err
  1638  			}
  1639  			tasks = append(tasks, nestedTask)
  1640  		}
  1641  		ts.Sequential = tasks
  1642  
  1643  		return &ts, nil
  1644  	}
  1645  
  1646  	if len(params.stage.Parallel) > 0 {
  1647  		var tasks []*transformedStage
  1648  		ts := transformedStage{Stage: params.stage, Depth: params.depth, EnclosingStage: params.enclosingStage, PreviousSiblingStage: params.previousSiblingStage}
  1649  		ts.computeWorkspace(params.parentWorkspace)
  1650  
  1651  		for _, nested := range params.stage.Parallel {
  1652  			nestedTask, err := stageToTask(stageToTaskParams{
  1653  				parentParams:    params.parentParams,
  1654  				stage:           nested,
  1655  				baseWorkingDir:  params.baseWorkingDir,
  1656  				parentEnv:       env,
  1657  				parentAgent:     agent,
  1658  				parentWorkspace: *ts.Stage.Options.Workspace,
  1659  				parentContainer: stageContainer,
  1660  				parentSidecars:  stageSidecars,
  1661  				parentVolumes:   stageVolumes,
  1662  				depth:           params.depth + 1,
  1663  				enclosingStage:  &ts,
  1664  			})
  1665  			if err != nil {
  1666  				return nil, err
  1667  			}
  1668  			tasks = append(tasks, nestedTask)
  1669  		}
  1670  		ts.Parallel = tasks
  1671  
  1672  		return &ts, nil
  1673  	}
  1674  	return nil, errors.New("no steps, sequential stages, or parallel stages")
  1675  }
  1676  
  1677  // MergeContainers combines parent and child container structs, with the child overriding the parent.
  1678  func MergeContainers(parentContainer, childContainer *corev1.Container) (*corev1.Container, error) {
  1679  	if parentContainer == nil {
  1680  		return childContainer, nil
  1681  	} else if childContainer == nil {
  1682  		return parentContainer, nil
  1683  	}
  1684  
  1685  	// We need JSON bytes to generate a patch to merge the child containers onto the parent container, so marshal the parent.
  1686  	parentAsJSON, err := json.Marshal(parentContainer)
  1687  	if err != nil {
  1688  		return nil, err
  1689  	}
  1690  	// We need to do a three-way merge to actually combine the parent and child containers, so we need an empty container
  1691  	// as the "original"
  1692  	emptyAsJSON, err := json.Marshal(&corev1.Container{})
  1693  	if err != nil {
  1694  		return nil, err
  1695  	}
  1696  	// Marshal the child to JSON
  1697  	childAsJSON, err := json.Marshal(childContainer)
  1698  	if err != nil {
  1699  		return nil, err
  1700  	}
  1701  
  1702  	// Get the patch meta for Container, which is needed for generating and applying the merge patch.
  1703  	patchSchema, err := strategicpatch.NewPatchMetaFromStruct(parentContainer)
  1704  
  1705  	if err != nil {
  1706  		return nil, err
  1707  	}
  1708  
  1709  	// Create a merge patch, with the empty JSON as the original, the child JSON as the modified, and the parent
  1710  	// JSON as the current - this lets us do a deep merge of the parent and child containers, with awareness of
  1711  	// the "patchMerge" tags.
  1712  	patch, err := strategicpatch.CreateThreeWayMergePatch(emptyAsJSON, childAsJSON, parentAsJSON, patchSchema, true)
  1713  	if err != nil {
  1714  		return nil, err
  1715  	}
  1716  
  1717  	// Actually apply the merge patch to the parent JSON.
  1718  	mergedAsJSON, err := strategicpatch.StrategicMergePatchUsingLookupPatchMeta(parentAsJSON, patch, patchSchema)
  1719  	if err != nil {
  1720  		return nil, err
  1721  	}
  1722  
  1723  	// Unmarshal the merged JSON to a Container pointer, and return it.
  1724  	merged := &corev1.Container{}
  1725  	err = json.Unmarshal(mergedAsJSON, merged)
  1726  	if err != nil {
  1727  		return nil, err
  1728  	}
  1729  
  1730  	return merged, nil
  1731  }
  1732  
  1733  func isNestedFirstStepsStage(enclosingStage *transformedStage) bool {
  1734  	if enclosingStage != nil {
  1735  		if enclosingStage.PreviousSiblingStage != nil {
  1736  			return false
  1737  		}
  1738  		return isNestedFirstStepsStage(enclosingStage.EnclosingStage)
  1739  	}
  1740  	return true
  1741  }
  1742  
  1743  type generateStepsParams struct {
  1744  	stageParams     stageToTaskParams
  1745  	step            Step
  1746  	inheritedAgent  string
  1747  	env             []corev1.EnvVar
  1748  	parentContainer *corev1.Container
  1749  	stepCounter     int
  1750  }
  1751  
  1752  func generateSteps(params generateStepsParams) ([]tektonv1alpha1.Step, map[string]corev1.Volume, int, error) {
  1753  	volumes := make(map[string]corev1.Volume)
  1754  	var steps []tektonv1alpha1.Step
  1755  
  1756  	stepImage := params.inheritedAgent
  1757  	if params.step.GetImage() != "" {
  1758  		stepImage = params.step.GetImage()
  1759  	}
  1760  
  1761  	// Default to ${WorkingDirRoot}/${sourceDir}
  1762  	workingDir := filepath.Join(WorkingDirRoot, params.stageParams.parentParams.SourceDir)
  1763  
  1764  	// Directory we will cd to if it differs from the working dir.
  1765  	targetDir := workingDir
  1766  
  1767  	if params.step.Dir != "" {
  1768  		targetDir = params.step.Dir
  1769  	} else if params.stageParams.baseWorkingDir != nil {
  1770  		targetDir = *(params.stageParams.baseWorkingDir)
  1771  	}
  1772  	// Relative working directories are always just added to /workspace/source, e.g.
  1773  	if !filepath.IsAbs(targetDir) {
  1774  		targetDir = filepath.Join(WorkingDirRoot, params.stageParams.parentParams.SourceDir, targetDir)
  1775  	}
  1776  
  1777  	if params.step.GetCommand() != "" {
  1778  		var targetDirPrefix []string
  1779  		if targetDir != workingDir && !params.stageParams.parentParams.InterpretMode {
  1780  			targetDirPrefix = append(targetDirPrefix, "cd", targetDir, "&&")
  1781  		}
  1782  		c := &corev1.Container{}
  1783  		if params.parentContainer != nil {
  1784  			c = params.parentContainer.DeepCopy()
  1785  		}
  1786  		if params.stageParams.parentParams.PodTemplates != nil && params.stageParams.parentParams.PodTemplates[stepImage] != nil {
  1787  			podTemplate := params.stageParams.parentParams.PodTemplates[stepImage]
  1788  			containers := podTemplate.Spec.Containers
  1789  			for _, volume := range podTemplate.Spec.Volumes {
  1790  				volumes[volume.Name] = volume
  1791  			}
  1792  			if !equality.Semantic.DeepEqual(c, &corev1.Container{}) {
  1793  				merged, err := MergeContainers(&containers[0], c)
  1794  				if err != nil {
  1795  					return nil, nil, params.stepCounter, errors.Wrapf(err, "Error merging pod template and parent container: %s", err)
  1796  				}
  1797  				c = merged
  1798  			} else {
  1799  				c = &containers[0]
  1800  			}
  1801  		} else {
  1802  			c.Image = stepImage
  1803  			c.Command = []string{util.GetSh(), "-c"}
  1804  		}
  1805  
  1806  		resolvedImage, err := versionstream.ResolveDockerImage(params.stageParams.parentParams.VersionsDir, c.Image)
  1807  		if err != nil {
  1808  			log.Logger().Warnf("failed to resolve step image version: %s due to %s", c.Image, err.Error())
  1809  		} else {
  1810  			c.Image = resolvedImage
  1811  		}
  1812  		// Special-casing for commands starting with /kaniko/warmer, which doesn't have sh at all
  1813  		if strings.HasPrefix(params.step.GetCommand(), "/kaniko/warmer") {
  1814  			c.Command = append(targetDirPrefix, params.step.GetCommand())
  1815  			c.Args = params.step.Arguments
  1816  		} else {
  1817  			// If it's /kaniko/executor, use /busybox/sh instead of /bin/sh, and use the debug image
  1818  			if strings.HasPrefix(params.step.GetCommand(), "/kaniko/executor") && strings.Contains(c.Image, "gcr.io/kaniko-project") {
  1819  				if !strings.Contains(c.Image, "debug") {
  1820  					c.Image = strings.Replace(c.Image, "/executor:", "/executor:debug-", 1)
  1821  				}
  1822  				c.Command = []string{"/busybox/sh", "-c"}
  1823  			}
  1824  			cmdStr := params.step.GetCommand()
  1825  			if len(params.step.Arguments) > 0 {
  1826  				cmdStr += " " + strings.Join(params.step.Arguments, " ")
  1827  			}
  1828  			if len(targetDirPrefix) > 0 {
  1829  				cmdStr = strings.Join(targetDirPrefix, " ") + " " + cmdStr
  1830  			}
  1831  			c.Args = []string{cmdStr}
  1832  		}
  1833  		if params.stageParams.parentParams.InterpretMode {
  1834  			c.WorkingDir = targetDir
  1835  		} else {
  1836  			var newCmd []string
  1837  			var newArgs []string
  1838  			for _, c := range c.Command {
  1839  				newCmd = append(newCmd, ReplaceCurlyWithParen(c))
  1840  			}
  1841  			c.Command = newCmd
  1842  			for _, a := range c.Args {
  1843  				newArgs = append(newArgs, ReplaceCurlyWithParen(a))
  1844  			}
  1845  			c.Args = newArgs
  1846  			c.WorkingDir = workingDir
  1847  		}
  1848  		params.stepCounter++
  1849  		if params.step.Name != "" {
  1850  			c.Name = MangleToRfc1035Label(params.step.Name, "")
  1851  		} else {
  1852  			c.Name = "step" + strconv.Itoa(1+params.stepCounter)
  1853  		}
  1854  
  1855  		c.Stdin = false
  1856  		c.TTY = false
  1857  		c.Env = scopedEnv(params.step.Env, scopedEnv(params.env, c.Env))
  1858  
  1859  		steps = append(steps, tektonv1alpha1.Step{
  1860  			Container: *c,
  1861  		})
  1862  	} else if params.step.Loop != nil {
  1863  		for i, v := range params.step.Loop.Values {
  1864  			loopEnv := scopedEnv([]corev1.EnvVar{{Name: params.step.Loop.Variable, Value: v}}, params.env)
  1865  
  1866  			for _, s := range params.step.Loop.Steps {
  1867  				if s.Name != "" {
  1868  					s.Name = s.Name + strconv.Itoa(1+i)
  1869  				}
  1870  				loopSteps, loopVolumes, loopCounter, loopErr := generateSteps(generateStepsParams{
  1871  					stageParams:     params.stageParams,
  1872  					step:            s,
  1873  					inheritedAgent:  stepImage,
  1874  					env:             loopEnv,
  1875  					parentContainer: params.parentContainer,
  1876  					stepCounter:     params.stepCounter,
  1877  				})
  1878  				if loopErr != nil {
  1879  					return nil, nil, loopCounter, loopErr
  1880  				}
  1881  
  1882  				// Bump the step counter to what we got from the loop
  1883  				params.stepCounter = loopCounter
  1884  
  1885  				// Add the loop-generated steps
  1886  				steps = append(steps, loopSteps...)
  1887  
  1888  				// Add any new volumes that may have shown up
  1889  				for k, v := range loopVolumes {
  1890  					volumes[k] = v
  1891  				}
  1892  			}
  1893  		}
  1894  	} else {
  1895  		return nil, nil, params.stepCounter, errors.New("syntactic sugar steps not yet supported")
  1896  	}
  1897  
  1898  	// lets make sure if we've overloaded any environment variables we remove any remaining valueFrom structs
  1899  	// to avoid creating bad Tasks
  1900  	for i, step := range steps {
  1901  		for j, e := range step.Env {
  1902  			if e.Value != "" {
  1903  				steps[i].Env[j].ValueFrom = nil
  1904  			}
  1905  		}
  1906  	}
  1907  
  1908  	return steps, volumes, params.stepCounter, nil
  1909  }
  1910  
  1911  // PipelineRunName returns the pipeline name given the pipeline and build identifier
  1912  func PipelineRunName(pipelineIdentifier string, buildIdentifier string) string {
  1913  	return MangleToRfc1035Label(fmt.Sprintf("%s", pipelineIdentifier), buildIdentifier)
  1914  }
  1915  
  1916  // CRDsFromPipelineParams is how the parameters to GenerateCRDs are specified
  1917  type CRDsFromPipelineParams struct {
  1918  	PipelineIdentifier string
  1919  	BuildIdentifier    string
  1920  	Namespace          string
  1921  	PodTemplates       map[string]*corev1.Pod
  1922  	VersionsDir        string
  1923  	TaskParams         []tektonv1alpha1.ParamSpec
  1924  	SourceDir          string
  1925  	Labels             map[string]string
  1926  	DefaultImage       string
  1927  	InterpretMode      bool
  1928  }
  1929  
  1930  // GenerateCRDs translates the Pipeline structure into the corresponding Pipeline and Task CRDs
  1931  func (j *ParsedPipeline) GenerateCRDs(params CRDsFromPipelineParams) (*tektonv1alpha1.Pipeline, []*tektonv1alpha1.Task, *v1.PipelineStructure, error) {
  1932  	if len(j.Post) != 0 {
  1933  		return nil, nil, nil, errors.New("Post at top level not yet supported")
  1934  	}
  1935  
  1936  	var parentContainer *corev1.Container
  1937  	var parentSidecars []*corev1.Container
  1938  	var parentVolumes []*corev1.Volume
  1939  
  1940  	baseWorkingDir := j.WorkingDir
  1941  
  1942  	if j.Options != nil {
  1943  		o := j.Options
  1944  		if o.Retry > 0 {
  1945  			return nil, nil, nil, errors.New("Retry at top level not yet supported")
  1946  		}
  1947  		parentContainer = o.ContainerOptions
  1948  		parentSidecars = o.Sidecars
  1949  		parentVolumes = o.Volumes
  1950  	}
  1951  
  1952  	p := &tektonv1alpha1.Pipeline{
  1953  		TypeMeta: metav1.TypeMeta{
  1954  			APIVersion: TektonAPIVersion,
  1955  			Kind:       "Pipeline",
  1956  		},
  1957  		ObjectMeta: metav1.ObjectMeta{
  1958  			Namespace: params.Namespace,
  1959  			Name:      PipelineRunName(params.PipelineIdentifier, params.BuildIdentifier),
  1960  		},
  1961  		Spec: tektonv1alpha1.PipelineSpec{
  1962  			Resources: []tektonv1alpha1.PipelineDeclaredResource{
  1963  				{
  1964  					Name: params.PipelineIdentifier,
  1965  					Type: tektonv1alpha1.PipelineResourceTypeGit,
  1966  				},
  1967  			},
  1968  		},
  1969  	}
  1970  
  1971  	p.SetDefaults(context.Background())
  1972  
  1973  	structure := &v1.PipelineStructure{
  1974  		ObjectMeta: metav1.ObjectMeta{
  1975  			Name: p.Name,
  1976  		},
  1977  	}
  1978  
  1979  	if len(params.Labels) > 0 {
  1980  		p.Labels = util.MergeMaps(params.Labels)
  1981  		structure.Labels = util.MergeMaps(params.Labels)
  1982  	}
  1983  
  1984  	var previousStage *transformedStage
  1985  
  1986  	var tasks []*tektonv1alpha1.Task
  1987  
  1988  	baseEnv := j.GetEnv()
  1989  
  1990  	for i, s := range j.Stages {
  1991  		isLastStage := i == len(j.Stages)-1
  1992  
  1993  		stage, err := stageToTask(stageToTaskParams{
  1994  			parentParams:         params,
  1995  			stage:                s,
  1996  			baseWorkingDir:       baseWorkingDir,
  1997  			parentEnv:            baseEnv,
  1998  			parentAgent:          j.Agent,
  1999  			parentWorkspace:      "default",
  2000  			parentContainer:      parentContainer,
  2001  			parentSidecars:       parentSidecars,
  2002  			parentVolumes:        parentVolumes,
  2003  			depth:                0,
  2004  			previousSiblingStage: previousStage,
  2005  		})
  2006  		if err != nil {
  2007  			return nil, nil, nil, err
  2008  		}
  2009  
  2010  		o := stage.Stage.Options
  2011  		if o.RootOptions != nil {
  2012  			if o.Retry > 0 {
  2013  				stage.Stage.Options.Retry = s.Options.Retry
  2014  				log.Logger().Infof("setting retries to %d for stage %s", stage.Stage.Options.Retry, stage.Stage.Name)
  2015  			}
  2016  		}
  2017  		previousStage = stage
  2018  
  2019  		pipelineTasks := createPipelineTasks(stage, p.Spec.Resources[0].Name)
  2020  
  2021  		linearTasks := stage.getLinearTasks()
  2022  
  2023  		for index, lt := range linearTasks {
  2024  			if shouldRemoveWorkspaceOutput(stage, lt.Name, index, len(linearTasks), isLastStage) {
  2025  				pipelineTasks[index].Resources.Outputs = nil
  2026  				lt.Spec.Outputs = nil
  2027  			}
  2028  			if len(lt.Spec.Inputs.Params) == 0 {
  2029  				lt.Spec.Inputs.Params = params.TaskParams
  2030  			}
  2031  		}
  2032  
  2033  		tasks = append(tasks, linearTasks...)
  2034  		p.Spec.Tasks = append(p.Spec.Tasks, pipelineTasks...)
  2035  		structure.Stages = append(structure.Stages, stage.getAllAsPipelineStructureStages()...)
  2036  	}
  2037  
  2038  	return p, tasks, structure, nil
  2039  }
  2040  
  2041  func shouldRemoveWorkspaceOutput(stage *transformedStage, taskName string, index int, tasksLen int, isLastStage bool) bool {
  2042  	if stage.isParallel() {
  2043  		parallelStages := stage.Parallel
  2044  		for _, ps := range parallelStages {
  2045  			if ps.Task != nil && ps.Task.Name == taskName {
  2046  				return true
  2047  			}
  2048  			seq := ps.Sequential
  2049  			if len(seq) > 0 {
  2050  				lastSeq := seq[len(seq)-1]
  2051  				if lastSeq.Task.Name == taskName {
  2052  					return true
  2053  				}
  2054  			}
  2055  
  2056  		}
  2057  	} else if index == tasksLen-1 && isLastStage {
  2058  		return true
  2059  	}
  2060  	return false
  2061  }
  2062  
  2063  func createPipelineTasks(stage *transformedStage, resourceName string) []tektonv1alpha1.PipelineTask {
  2064  	if stage.isSequential() {
  2065  		var pTasks []tektonv1alpha1.PipelineTask
  2066  		for _, nestedStage := range stage.Sequential {
  2067  			pTasks = append(pTasks, createPipelineTasks(nestedStage, resourceName)...)
  2068  		}
  2069  		return pTasks
  2070  	} else if stage.isParallel() {
  2071  		var pTasks []tektonv1alpha1.PipelineTask
  2072  		for _, nestedStage := range stage.Parallel {
  2073  			pTasks = append(pTasks, createPipelineTasks(nestedStage, resourceName)...)
  2074  		}
  2075  		return pTasks
  2076  	} else {
  2077  		pTask := tektonv1alpha1.PipelineTask{
  2078  			Name: stage.Stage.stageLabelName(),
  2079  			TaskRef: &tektonv1alpha1.TaskRef{
  2080  				Name: stage.Task.Name,
  2081  			},
  2082  			Retries: int(stage.Stage.Options.Retry),
  2083  		}
  2084  
  2085  		_, provider := findWorkspaceProvider(stage, stage.getEnclosing(0))
  2086  		var previousStageNames []string
  2087  		for _, previousStage := range findPreviousNonBlockStages(*stage) {
  2088  			previousStageNames = append(previousStageNames, previousStage.PipelineTask.Name)
  2089  		}
  2090  		pTask.Resources = &tektonv1alpha1.PipelineTaskResources{
  2091  			Inputs: []tektonv1alpha1.PipelineTaskInputResource{
  2092  				{
  2093  					Name:     "workspace",
  2094  					Resource: resourceName,
  2095  					From:     provider,
  2096  				},
  2097  			},
  2098  			Outputs: []tektonv1alpha1.PipelineTaskOutputResource{
  2099  				{
  2100  					Name:     "workspace",
  2101  					Resource: resourceName,
  2102  				},
  2103  			},
  2104  		}
  2105  		pTask.RunAfter = previousStageNames
  2106  		stage.PipelineTask = &pTask
  2107  
  2108  		return []tektonv1alpha1.PipelineTask{pTask}
  2109  	}
  2110  }
  2111  
  2112  // Looks for the most recent Task using the desired workspace that was not in the
  2113  // same parallel stage and returns the name of the corresponding Task.
  2114  func findWorkspaceProvider(stage, sibling *transformedStage) (bool, []string) {
  2115  	if *stage.Stage.Options.Workspace == "empty" {
  2116  		return true, nil
  2117  	}
  2118  
  2119  	for sibling != nil {
  2120  		if sibling.isSequential() {
  2121  			found, provider := findWorkspaceProvider(stage, sibling.Sequential[len(sibling.Sequential)-1])
  2122  			if found {
  2123  				return true, provider
  2124  			}
  2125  		} else if sibling.isParallel() {
  2126  			// We don't want to use a workspace from a parallel stage outside of that stage,
  2127  			// but we do need to descend inwards in case stage is in that same stage.
  2128  			if stage.getEnclosing(sibling.Depth) == sibling {
  2129  				for _, nested := range sibling.Parallel {
  2130  					// Pick the parallel branch that has stage
  2131  					if stage.getEnclosing(nested.Depth) == nested {
  2132  						found, provider := findWorkspaceProvider(stage, nested)
  2133  						if found {
  2134  							return true, provider
  2135  						}
  2136  					}
  2137  				}
  2138  			}
  2139  			// TODO: What to do about custom workspaces? Check for erroneous uses specially?
  2140  			// Allow them if only one of the parallel tasks uses the same resource?
  2141  		} else if sibling.PipelineTask != nil {
  2142  			if *sibling.Stage.Options.Workspace == *stage.Stage.Options.Workspace {
  2143  				return true, []string{sibling.PipelineTask.Name}
  2144  			}
  2145  		} else {
  2146  			// We are in a sequential stage and sibling has not had its PipelineTask created.
  2147  			// Check the task before it so we don't use a workspace of a later task.
  2148  		}
  2149  		sibling = sibling.PreviousSiblingStage
  2150  	}
  2151  
  2152  	return false, nil
  2153  }
  2154  
  2155  // Find the end tasks for this stage, traversing down to the end stages of any
  2156  // nested sequential or parallel stages as well.
  2157  func findEndStages(stage transformedStage) []*transformedStage {
  2158  	if stage.isSequential() {
  2159  		return findEndStages(*stage.Sequential[len(stage.Sequential)-1])
  2160  	} else if stage.isParallel() {
  2161  		var endTasks []*transformedStage
  2162  		for _, pStage := range stage.Parallel {
  2163  			endTasks = append(endTasks, findEndStages(*pStage)...)
  2164  		}
  2165  		return endTasks
  2166  	} else {
  2167  		return []*transformedStage{&stage}
  2168  	}
  2169  }
  2170  
  2171  // Find the tasks that run immediately before this stage, not including
  2172  // sequential or parallel wrapper stages.
  2173  func findPreviousNonBlockStages(stage transformedStage) []*transformedStage {
  2174  	if stage.PreviousSiblingStage != nil {
  2175  		return findEndStages(*stage.PreviousSiblingStage)
  2176  	} else if stage.EnclosingStage != nil {
  2177  		return findPreviousNonBlockStages(*stage.EnclosingStage)
  2178  	} else {
  2179  		return []*transformedStage{}
  2180  	}
  2181  }
  2182  
  2183  // Return the stage that encloses this stage at the given depth, or nil if there is no such stage.
  2184  // Depth must be >= 0. Returns the stage itself if depth == stage.Depth
  2185  func (ts *transformedStage) getEnclosing(depth int8) *transformedStage {
  2186  	if ts.Depth == depth {
  2187  		return ts
  2188  	} else if ts.EnclosingStage == nil {
  2189  		return nil
  2190  	} else {
  2191  		return ts.EnclosingStage.getEnclosing(depth)
  2192  	}
  2193  }
  2194  
  2195  func findDuplicates(names []string) *apis.FieldError {
  2196  	// Count members
  2197  	counts := make(map[string]int)
  2198  	mangled := make(map[string]string)
  2199  	for _, v := range names {
  2200  		counts[MangleToRfc1035Label(v, "")]++
  2201  		mangled[v] = MangleToRfc1035Label(v, "")
  2202  	}
  2203  
  2204  	var duplicateNames []string
  2205  	for k, v := range mangled {
  2206  		if counts[v] > 1 {
  2207  			duplicateNames = append(duplicateNames, "'"+k+"'")
  2208  		}
  2209  	}
  2210  
  2211  	if len(duplicateNames) > 0 {
  2212  		// Avoid nondeterminism in error messages
  2213  		sort.Strings(duplicateNames)
  2214  		return &apis.FieldError{
  2215  			Message: "Stage names must be unique",
  2216  			Details: "The following stage names are used more than once: " + strings.Join(duplicateNames, ", "),
  2217  		}
  2218  	}
  2219  	return nil
  2220  }
  2221  
  2222  func validateStageNames(j *ParsedPipeline) (err *apis.FieldError) {
  2223  	var validate func(stages []Stage, stageNames *[]string)
  2224  	validate = func(stages []Stage, stageNames *[]string) {
  2225  
  2226  		for _, stage := range stages {
  2227  			*stageNames = append(*stageNames, stage.Name)
  2228  			if len(stage.Stages) > 0 {
  2229  				validate(stage.Stages, stageNames)
  2230  			}
  2231  		}
  2232  
  2233  	}
  2234  	var names []string
  2235  
  2236  	validate(j.Stages, &names)
  2237  
  2238  	err = findDuplicates(names)
  2239  
  2240  	return
  2241  }
  2242  
  2243  func builderHomeStep(envs []corev1.EnvVar, parentContainer *corev1.Container, defaultImage string, versionsDir string) ([]tektonv1alpha1.Step, error) {
  2244  	var err error
  2245  	image := defaultImage
  2246  	if image == "" {
  2247  		image = os.Getenv("BUILDER_JX_IMAGE")
  2248  		if image == "" {
  2249  			image, err = versionstream.ResolveDockerImage(versionsDir, GitMergeImage)
  2250  			if err != nil {
  2251  				return []tektonv1alpha1.Step{}, err
  2252  			}
  2253  		}
  2254  	}
  2255  
  2256  	builderHomeContainer := &corev1.Container{
  2257  		Name:       "setup-builder-home",
  2258  		Image:      image,
  2259  		Command:    []string{util.GetSh(), "-c"},
  2260  		Args:       []string{`[ -d /builder/home ] || mkdir -p /builder && ln -s /tekton/home /builder/home`},
  2261  		WorkingDir: "/workspace/source",
  2262  		Env:        envs,
  2263  	}
  2264  
  2265  	if parentContainer != nil {
  2266  		mergedHome, err := MergeContainers(parentContainer, builderHomeContainer)
  2267  		if err != nil {
  2268  			return []tektonv1alpha1.Step{}, err
  2269  		}
  2270  		builderHomeContainer = mergedHome
  2271  	}
  2272  
  2273  	return []tektonv1alpha1.Step{{
  2274  		Container: *builderHomeContainer,
  2275  	}}, nil
  2276  }
  2277  
  2278  // todo JR lets remove this when we switch tekton to using git merge type pipelineresources
  2279  func getDefaultTaskSpec(envs []corev1.EnvVar, parentContainer *corev1.Container, defaultImage string, versionsDir string) (tektonv1alpha1.TaskSpec, error) {
  2280  	var err error
  2281  	image := defaultImage
  2282  	if image == "" {
  2283  		image = os.Getenv("BUILDER_JX_IMAGE")
  2284  		if image == "" {
  2285  			image, err = versionstream.ResolveDockerImage(versionsDir, GitMergeImage)
  2286  			if err != nil {
  2287  				return tektonv1alpha1.TaskSpec{}, err
  2288  			}
  2289  		}
  2290  	}
  2291  
  2292  	childContainer := &corev1.Container{
  2293  		Name:       "git-merge",
  2294  		Image:      image,
  2295  		Command:    []string{"jx"},
  2296  		Args:       []string{"step", "git", "merge", "--verbose"},
  2297  		WorkingDir: "/workspace/source",
  2298  		Env:        envs,
  2299  	}
  2300  
  2301  	if parentContainer != nil {
  2302  		mergedChild, err := MergeContainers(parentContainer, childContainer)
  2303  		if err != nil {
  2304  			return tektonv1alpha1.TaskSpec{}, err
  2305  		}
  2306  		childContainer = mergedChild
  2307  	}
  2308  
  2309  	return tektonv1alpha1.TaskSpec{
  2310  		TaskSpec: tektonv1beta1.TaskSpec{
  2311  			Steps: []tektonv1alpha1.Step{
  2312  				{
  2313  					Container: *childContainer,
  2314  				},
  2315  			},
  2316  		},
  2317  	}, nil
  2318  }
  2319  
  2320  // HasNonStepOverrides returns true if this override contains configuration like agent, containerOptions, or volumes.
  2321  func (p *PipelineOverride) HasNonStepOverrides() bool {
  2322  	return p.ContainerOptions != nil || p.Agent != nil || len(p.Volumes) > 0
  2323  }
  2324  
  2325  // AsStepsSlice returns a possibly empty slice of the step or steps in this override
  2326  func (p *PipelineOverride) AsStepsSlice() []*Step {
  2327  	if p.Step != nil {
  2328  		return []*Step{p.Step}
  2329  	}
  2330  	if len(p.Steps) > 0 {
  2331  		return p.Steps
  2332  	}
  2333  	return []*Step{}
  2334  }
  2335  
  2336  // MatchesPipeline returns true if the pipeline name is specified in the override or no pipeline is specified at all in the override
  2337  func (p *PipelineOverride) MatchesPipeline(name string) bool {
  2338  	if p.Pipeline == "" || strings.EqualFold(p.Pipeline, name) {
  2339  		return true
  2340  	}
  2341  	return false
  2342  }
  2343  
  2344  // MatchesStage returns true if the stage/lifecycle name is specified in the override or no stage/lifecycle is specified at all in the override
  2345  func (p *PipelineOverride) MatchesStage(name string) bool {
  2346  	if p.Stage == "" || p.Stage == name {
  2347  		return true
  2348  	}
  2349  	return false
  2350  }
  2351  
  2352  // ApplyStepOverridesToPipeline applies an individual override to the pipeline, replacing named steps in specified stages (or all stages if
  2353  // no stage name is specified).
  2354  func ApplyStepOverridesToPipeline(pipeline *ParsedPipeline, override *PipelineOverride) *ParsedPipeline {
  2355  	if pipeline == nil || override == nil {
  2356  		return pipeline
  2357  	}
  2358  
  2359  	var newStages []Stage
  2360  	for _, s := range pipeline.Stages {
  2361  		overriddenStage := ApplyStepOverridesToStage(s, override)
  2362  		if !equality.Semantic.DeepEqual(overriddenStage, Stage{}) {
  2363  			newStages = append(newStages, overriddenStage)
  2364  		}
  2365  	}
  2366  	pipeline.Stages = newStages
  2367  
  2368  	return pipeline
  2369  }
  2370  
  2371  func stepPointerSliceToStepSlice(orig []*Step) []Step {
  2372  	var newSteps []Step
  2373  	for _, s := range orig {
  2374  		if s != nil {
  2375  			newSteps = append(newSteps, *s)
  2376  		}
  2377  	}
  2378  
  2379  	return newSteps
  2380  }
  2381  
  2382  // ApplyNonStepOverridesToPipeline applies the non-step configuration from an individual override to the pipeline.
  2383  func ApplyNonStepOverridesToPipeline(pipeline *ParsedPipeline, override *PipelineOverride) *ParsedPipeline {
  2384  	if pipeline == nil || override == nil {
  2385  		return pipeline
  2386  	}
  2387  
  2388  	// Only apply this override to the top-level pipeline if no stage is specified.
  2389  	if override.Stage == "" {
  2390  		if override.Agent != nil {
  2391  			pipeline.Agent = override.Agent
  2392  		}
  2393  		if override.ContainerOptions != nil {
  2394  			containerOptionsCopy := *override.ContainerOptions
  2395  			if pipeline.Options == nil {
  2396  				pipeline.Options = &RootOptions{}
  2397  			}
  2398  			if pipeline.Options.ContainerOptions == nil {
  2399  				pipeline.Options.ContainerOptions = &containerOptionsCopy
  2400  			} else {
  2401  				mergedContainer, err := MergeContainers(pipeline.Options.ContainerOptions, &containerOptionsCopy)
  2402  				if err != nil {
  2403  					log.Logger().Warnf("couldn't merge override container options: %s", err)
  2404  				} else {
  2405  					pipeline.Options.ContainerOptions = mergedContainer
  2406  				}
  2407  			}
  2408  		}
  2409  		if len(override.Sidecars) > 0 {
  2410  			if pipeline.Options == nil {
  2411  				pipeline.Options = &RootOptions{}
  2412  			}
  2413  			pipeline.Options.Sidecars = append(pipeline.Options.Sidecars, override.Sidecars...)
  2414  		}
  2415  		if len(override.Volumes) > 0 {
  2416  			if pipeline.Options == nil {
  2417  				pipeline.Options = &RootOptions{}
  2418  			}
  2419  			pipeline.Options.Volumes = append(pipeline.Options.Volumes, override.Volumes...)
  2420  		}
  2421  	}
  2422  
  2423  	var newStages []Stage
  2424  	for _, s := range pipeline.Stages {
  2425  		overriddenStage := ApplyNonStepOverridesToStage(s, override)
  2426  		if !equality.Semantic.DeepEqual(overriddenStage, Stage{}) {
  2427  			newStages = append(newStages, overriddenStage)
  2428  		}
  2429  	}
  2430  	pipeline.Stages = newStages
  2431  
  2432  	return pipeline
  2433  }
  2434  
  2435  // ApplyNonStepOverridesToStage applies non-step overrides, such as stage agent, containerOptions, and volumes, to this
  2436  // stage and its children.
  2437  func ApplyNonStepOverridesToStage(stage Stage, override *PipelineOverride) Stage {
  2438  	if override == nil {
  2439  		return stage
  2440  	}
  2441  
  2442  	// Since a traditional build pack only has one stage at this point, treat anything that's stage-specific as valid here.
  2443  	if (override.MatchesStage(stage.Name) || stage.Name == DefaultStageNameForBuildPack) && override.Stage != "" {
  2444  		if override.Agent != nil {
  2445  			stage.Agent = override.Agent
  2446  		}
  2447  		if override.ContainerOptions != nil {
  2448  			containerOptionsCopy := *override.ContainerOptions
  2449  			if stage.Options == nil {
  2450  				stage.Options = &StageOptions{
  2451  					RootOptions: &RootOptions{},
  2452  				}
  2453  			}
  2454  			if stage.Options.ContainerOptions == nil {
  2455  				stage.Options.ContainerOptions = &containerOptionsCopy
  2456  			} else {
  2457  				mergedContainer, err := MergeContainers(stage.Options.ContainerOptions, &containerOptionsCopy)
  2458  				if err != nil {
  2459  					log.Logger().Warnf("couldn't merge override container options: %s", err)
  2460  				} else {
  2461  					stage.Options.ContainerOptions = mergedContainer
  2462  				}
  2463  			}
  2464  		}
  2465  
  2466  		if len(override.Sidecars) > 0 {
  2467  			if stage.Options == nil {
  2468  				stage.Options = &StageOptions{
  2469  					RootOptions: &RootOptions{},
  2470  				}
  2471  			}
  2472  			stage.Options.Sidecars = append(stage.Options.Sidecars, override.Sidecars...)
  2473  		}
  2474  
  2475  		if len(override.Volumes) > 0 {
  2476  			if stage.Options == nil {
  2477  				stage.Options = &StageOptions{
  2478  					RootOptions: &RootOptions{},
  2479  				}
  2480  			}
  2481  			stage.Options.Volumes = append(stage.Options.Volumes, override.Volumes...)
  2482  		}
  2483  	}
  2484  	if len(stage.Stages) > 0 {
  2485  		var newStages []Stage
  2486  		for _, s := range stage.Stages {
  2487  			newStages = append(newStages, ApplyNonStepOverridesToStage(s, override))
  2488  		}
  2489  		stage.Stages = newStages
  2490  	}
  2491  	if len(stage.Parallel) > 0 {
  2492  		var newParallel []Stage
  2493  		for _, s := range stage.Parallel {
  2494  			newParallel = append(newParallel, ApplyNonStepOverridesToStage(s, override))
  2495  		}
  2496  		stage.Parallel = newParallel
  2497  	}
  2498  
  2499  	return stage
  2500  }
  2501  
  2502  // ApplyStepOverridesToStage applies a set of overrides to named steps in this stage and its children
  2503  func ApplyStepOverridesToStage(stage Stage, override *PipelineOverride) Stage {
  2504  	if override == nil {
  2505  		return stage
  2506  	}
  2507  
  2508  	if override.MatchesStage(stage.Name) {
  2509  		if len(stage.Steps) > 0 {
  2510  			var newSteps []Step
  2511  			if override.Name != "" {
  2512  				for _, s := range stage.Steps {
  2513  					newSteps = append(newSteps, OverrideStep(s, override)...)
  2514  				}
  2515  			} else {
  2516  				// If no step name was specified but there are steps, just replace all steps in the stage/lifecycle,
  2517  				// or add the new steps before/after the existing steps in the stage/lifecycle
  2518  				if steps := override.AsStepsSlice(); len(steps) > 0 {
  2519  					if override.Type == nil || *override.Type == StepOverrideReplace {
  2520  						newSteps = append(newSteps, stepPointerSliceToStepSlice(steps)...)
  2521  					} else if *override.Type == StepOverrideBefore {
  2522  						newSteps = append(newSteps, stepPointerSliceToStepSlice(steps)...)
  2523  						newSteps = append(newSteps, stage.Steps...)
  2524  					} else if *override.Type == StepOverrideAfter {
  2525  						newSteps = append(newSteps, stage.Steps...)
  2526  						newSteps = append(newSteps, stepPointerSliceToStepSlice(steps)...)
  2527  					}
  2528  				}
  2529  				// If there aren't any steps as well as no step name, then we're removing all steps from this stage/lifecycle,
  2530  				// so just don't add anything to newSteps, and we'll end up returning an empty stage
  2531  			}
  2532  
  2533  			// If newSteps isn't empty, use it for the stage's steps list. Otherwise, if no agent override is specified,
  2534  			// we're removing this stage, so return an empty stage.
  2535  			if len(newSteps) > 0 {
  2536  				stage.Steps = newSteps
  2537  			} else if !override.HasNonStepOverrides() {
  2538  				return Stage{}
  2539  			}
  2540  		}
  2541  	}
  2542  	if len(stage.Stages) > 0 {
  2543  		var newStages []Stage
  2544  		for _, s := range stage.Stages {
  2545  			newStages = append(newStages, ApplyStepOverridesToStage(s, override))
  2546  		}
  2547  		stage.Stages = newStages
  2548  	}
  2549  	if len(stage.Parallel) > 0 {
  2550  		var newParallel []Stage
  2551  		for _, s := range stage.Parallel {
  2552  			newParallel = append(newParallel, ApplyStepOverridesToStage(s, override))
  2553  		}
  2554  		stage.Parallel = newParallel
  2555  	}
  2556  
  2557  	return stage
  2558  }
  2559  
  2560  // OverrideStep overrides an existing step, if it matches the override's name, with the contents of the override. It also
  2561  // recurses into child steps.
  2562  func OverrideStep(step Step, override *PipelineOverride) []Step {
  2563  	if override != nil {
  2564  		if step.Name == override.Name {
  2565  			var newSteps []Step
  2566  
  2567  			if override.Step != nil {
  2568  				if override.Step.Name == "" {
  2569  					override.Step.Name = step.Name
  2570  				}
  2571  				newSteps = append(newSteps, *override.Step)
  2572  			}
  2573  			if override.Steps != nil {
  2574  				for _, s := range override.Steps {
  2575  					newSteps = append(newSteps, *s)
  2576  				}
  2577  			}
  2578  
  2579  			if override.Type == nil || *override.Type == StepOverrideReplace {
  2580  				return newSteps
  2581  			} else if *override.Type == StepOverrideBefore {
  2582  				return append(newSteps, step)
  2583  			} else if *override.Type == StepOverrideAfter {
  2584  				return append([]Step{step}, newSteps...)
  2585  			}
  2586  
  2587  			// Fall back on just returning the original. We shouldn't ever get here.
  2588  			return []Step{step}
  2589  		}
  2590  
  2591  		if len(step.Steps) > 0 {
  2592  			var newSteps []*Step
  2593  			for _, s := range step.Steps {
  2594  				for _, o := range OverrideStep(*s, override) {
  2595  					stepCopy := o
  2596  					newSteps = append(newSteps, &stepCopy)
  2597  				}
  2598  			}
  2599  			step.Steps = newSteps
  2600  		}
  2601  	}
  2602  
  2603  	return []Step{step}
  2604  }
  2605  
  2606  // StringParamValue generates a Tekton ArrayOrString value for the given string
  2607  func StringParamValue(val string) tektonv1alpha1.ArrayOrString {
  2608  	return tektonv1alpha1.ArrayOrString{
  2609  		Type:      tektonv1alpha1.ParamTypeString,
  2610  		StringVal: val,
  2611  	}
  2612  }
  2613  
  2614  // ReplaceCurlyWithParen replaces legacy "${inputs.params.foo}" with "$(inputs.params.foo)"
  2615  func ReplaceCurlyWithParen(input string) string {
  2616  	re := regexp.MustCompile(braceMatchingRegex)
  2617  	matches := re.FindAllStringSubmatch(input, -1)
  2618  	for _, m := range matches {
  2619  		if len(m) >= 3 {
  2620  			input = strings.ReplaceAll(input, m[0], "$("+m[3]+")")
  2621  		}
  2622  	}
  2623  	return input
  2624  }