github.com/olli-ai/jx/v2@v2.0.400-0.20210921045218-14731b4dd448/pkg/tekton/syntax/pipeline.go (about)

     1  package syntax
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"encoding/json"
     7  	"fmt"
     8  	"os"
     9  	"path/filepath"
    10  	"regexp"
    11  	"sort"
    12  	"strconv"
    13  	"strings"
    14  	"time"
    15  
    16  	v1 "github.com/jenkins-x/jx-api/pkg/apis/jenkins.io/v1"
    17  	"github.com/jenkins-x/jx-logging/pkg/log"
    18  	"github.com/olli-ai/jx/v2/pkg/kube/naming"
    19  	"github.com/olli-ai/jx/v2/pkg/util"
    20  	"github.com/olli-ai/jx/v2/pkg/versionstream"
    21  	"github.com/pkg/errors"
    22  	"github.com/tektoncd/pipeline/pkg/apis/pipeline"
    23  	tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
    24  	tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
    25  	corev1 "k8s.io/api/core/v1"
    26  	"k8s.io/apimachinery/pkg/api/equality"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/util/strategicpatch"
    29  	"k8s.io/client-go/kubernetes"
    30  	"knative.dev/pkg/apis"
    31  )
    32  
    33  const (
    34  	// GitMergeImage is the default image name that is used in the git merge step of a pipeline
    35  	GitMergeImage = "ghcr.io/jenkins-x/builder-jx"
    36  
    37  	// WorkingDirRoot is the root directory for working directories.
    38  	WorkingDirRoot = "/workspace"
    39  
    40  	// braceMatchingRegex matches "${inputs.params.foo}" so we can replace it with "$(inputs.params.foo)"
    41  	braceMatchingRegex = "(\\$(\\{(?P<var>inputs\\.params\\.[_a-zA-Z][_a-zA-Z0-9.-]*)\\}))"
    42  )
    43  
    44  var (
    45  	ipAddressRegistryRegex = regexp.MustCompile(`\d+\.\d+\.\d+\.\d+.\d+(:\d+)?`)
    46  
    47  	commandIsSkaffoldRegex = regexp.MustCompile(`export VERSION=.*? && skaffold build.*`)
    48  )
    49  
    50  // ParsedPipeline is the internal representation of the Pipeline, used to validate and create CRDs
    51  type ParsedPipeline struct {
    52  	Agent      *Agent          `json:"agent,omitempty"`
    53  	Env        []corev1.EnvVar `json:"env,omitempty"`
    54  	Options    *RootOptions    `json:"options,omitempty"`
    55  	Stages     []Stage         `json:"stages"`
    56  	Post       []Post          `json:"post,omitempty"`
    57  	WorkingDir *string         `json:"dir,omitempty"`
    58  
    59  	// Replaced by Env, retained for backwards compatibility
    60  	Environment []corev1.EnvVar `json:"environment,omitempty"`
    61  }
    62  
    63  // Agent defines where the pipeline, stage, or step should run.
    64  type Agent struct {
    65  	// One of label or image is required.
    66  	Label string `json:"label,omitempty"`
    67  	Image string `json:"image,omitempty"`
    68  
    69  	// Legacy fields from jenkinsfile.PipelineAgent
    70  	Container string `json:"container,omitempty"`
    71  	Dir       string `json:"dir,omitempty"`
    72  }
    73  
    74  // TimeoutUnit is used for calculating timeout duration
    75  type TimeoutUnit string
    76  
    77  // The available time units.
    78  const (
    79  	TimeoutUnitSeconds TimeoutUnit = "seconds"
    80  	TimeoutUnitMinutes TimeoutUnit = "minutes"
    81  	TimeoutUnitHours   TimeoutUnit = "hours"
    82  	TimeoutUnitDays    TimeoutUnit = "days"
    83  )
    84  
    85  // All possible time units, used for validation
    86  var allTimeoutUnits = []TimeoutUnit{TimeoutUnitSeconds, TimeoutUnitMinutes, TimeoutUnitHours, TimeoutUnitDays}
    87  
    88  func allTimeoutUnitsAsStrings() []string {
    89  	tu := make([]string, len(allTimeoutUnits))
    90  
    91  	for i, u := range allTimeoutUnits {
    92  		tu[i] = string(u)
    93  	}
    94  
    95  	return tu
    96  }
    97  
    98  // Timeout defines how long a stage or pipeline can run before timing out.
    99  type Timeout struct {
   100  	Time int64 `json:"time"`
   101  	// Has some sane default - probably seconds
   102  	Unit TimeoutUnit `json:"unit,omitempty"`
   103  }
   104  
   105  // ToDuration generates a duration struct from a Timeout
   106  func (t *Timeout) ToDuration() (*metav1.Duration, error) {
   107  	durationStr := ""
   108  	// TODO: Populate a default timeout unit, most likely seconds.
   109  	if t.Unit != "" {
   110  		durationStr = fmt.Sprintf("%d%c", t.Time, t.Unit[0])
   111  	} else {
   112  		durationStr = fmt.Sprintf("%ds", t.Time)
   113  	}
   114  
   115  	d, err := time.ParseDuration(durationStr)
   116  	if err != nil {
   117  		return nil, err
   118  	}
   119  	return &metav1.Duration{Duration: d}, nil
   120  }
   121  
   122  // RootOptions contains options that can be configured on either a pipeline or a stage
   123  type RootOptions struct {
   124  	Timeout *Timeout `json:"timeout,omitempty"`
   125  	Retry   int8     `json:"retry,omitempty"`
   126  	// ContainerOptions allows for advanced configuration of containers for a single stage or the whole
   127  	// pipeline, adding to configuration that can be configured through the syntax already. This includes things
   128  	// like CPU/RAM requests/limits, secrets, ports, etc. Some of these things will end up with native syntax approaches
   129  	// down the road.
   130  	ContainerOptions              *corev1.Container   `json:"containerOptions,omitempty"`
   131  	Sidecars                      []*corev1.Container `json:"sidecars,omitempty"`
   132  	Volumes                       []*corev1.Volume    `json:"volumes,omitempty"`
   133  	DistributeParallelAcrossNodes bool                `json:"distributeParallelAcrossNodes,omitempty"`
   134  	Tolerations                   []corev1.Toleration `json:"tolerations,omitempty"`
   135  	PodLabels                     map[string]string   `json:"podLabels,omitempty"`
   136  }
   137  
   138  // Stash defines files to be saved for use in a later stage, marked with a name
   139  type Stash struct {
   140  	Name string `json:"name"`
   141  	// Eventually make this optional so that you can do volumes instead
   142  	Files string `json:"files"`
   143  }
   144  
   145  // Unstash defines a previously-defined stash to be copied into this stage's workspace
   146  type Unstash struct {
   147  	Name string `json:"name"`
   148  	Dir  string `json:"dir,omitempty"`
   149  }
   150  
   151  // StageOptions contains both options that can be configured on either a pipeline or a stage, via
   152  // RootOptions, or stage-specific options.
   153  type StageOptions struct {
   154  	*RootOptions `json:",inline"`
   155  
   156  	// TODO: Not yet implemented in build-pipeline
   157  	Stash   *Stash   `json:"stash,omitempty"`
   158  	Unstash *Unstash `json:"unstash,omitempty"`
   159  
   160  	Workspace *string `json:"workspace,omitempty"`
   161  }
   162  
   163  // Step defines a single step, from the author's perspective, to be executed within a stage.
   164  type Step struct {
   165  	// An optional name to give the step for reporting purposes
   166  	Name string `json:"name,omitempty"`
   167  
   168  	// One of command, step, or loop is required.
   169  	Command string `json:"command,omitempty"`
   170  	// args is optional, but only allowed with command
   171  	Arguments []string `json:"args,omitempty"`
   172  	// dir is optional, but only allowed with command. Refers to subdirectory of workspace
   173  	Dir string `json:"dir,omitempty"`
   174  
   175  	Step string `json:"step,omitempty"`
   176  	// options is optional, but only allowed with step
   177  	// Also, we'll need to do some magic to do type verification during translation - i.e., this step wants a number
   178  	// for this option, so translate the string value for that option to a number.
   179  	Options map[string]string `json:"options,omitempty"`
   180  
   181  	Loop *Loop `json:"loop,omitempty"`
   182  
   183  	// agent can be overridden on a step
   184  	Agent *Agent `json:"agent,omitempty"`
   185  
   186  	// Image alows the docker image for a step to be specified
   187  	Image string `json:"image,omitempty"`
   188  
   189  	// env allows defining per-step environment variables
   190  	Env []corev1.EnvVar `json:"env,omitempty"`
   191  
   192  	// Legacy fields from jenkinsfile.PipelineStep before it was eliminated.
   193  	Comment   string  `json:"comment,omitempty"`
   194  	Groovy    string  `json:"groovy,omitempty"`
   195  	Steps     []*Step `json:"steps,omitempty"`
   196  	When      string  `json:"when,omitempty"`
   197  	Container string  `json:"container,omitempty"`
   198  	Sh        string  `json:"sh,omitempty"`
   199  }
   200  
   201  // Loop is a special step that defines a variable, a list of possible values for that variable, and a set of steps to
   202  // repeat for each value for the variable, with the variable set with that value in the environment for the execution of
   203  // those steps.
   204  type Loop struct {
   205  	// The variable name.
   206  	Variable string `json:"variable"`
   207  	// The list of values to iterate over
   208  	Values []string `json:"values"`
   209  	// The steps to run
   210  	Steps []Step `json:"steps"`
   211  }
   212  
   213  // Stage is a unit of work in a pipeline, corresponding either to a Task or a set of Tasks to be run sequentially or in
   214  // parallel with common configuration.
   215  type Stage struct {
   216  	Name       string          `json:"name"`
   217  	Agent      *Agent          `json:"agent,omitempty"`
   218  	Env        []corev1.EnvVar `json:"env,omitempty"`
   219  	Options    *StageOptions   `json:"options,omitempty"`
   220  	Steps      []Step          `json:"steps,omitempty"`
   221  	Stages     []Stage         `json:"stages,omitempty"`
   222  	Parallel   []Stage         `json:"parallel,omitempty"`
   223  	Post       []Post          `json:"post,omitempty"`
   224  	WorkingDir *string         `json:"dir,omitempty"`
   225  
   226  	// Replaced by Env, retained for backwards compatibility
   227  	Environment []corev1.EnvVar `json:"environment,omitempty"`
   228  }
   229  
   230  // PostCondition is used to specify under what condition a post action should be executed.
   231  type PostCondition string
   232  
   233  // Probably extensible down the road
   234  const (
   235  	PostConditionSuccess PostCondition = "success"
   236  	PostConditionFailure PostCondition = "failure"
   237  	PostConditionAlways  PostCondition = "always"
   238  )
   239  
   240  // Post contains a PostCondition and one more actions to be executed after a pipeline or stage if the condition is met.
   241  type Post struct {
   242  	// TODO: Conditional execution of something after a Task or Pipeline completes is not yet implemented
   243  	Condition PostCondition `json:"condition"`
   244  	Actions   []PostAction  `json:"actions"`
   245  }
   246  
   247  // PostAction contains the name of a built-in post action and options to pass to that action.
   248  type PostAction struct {
   249  	// TODO: Notifications are not yet supported in Build Pipeline per se.
   250  	Name string `json:"name"`
   251  	// Also, we'll need to do some magic to do type verification during translation - i.e., this action wants a number
   252  	// for this option, so translate the string value for that option to a number.
   253  	Options map[string]string `json:"options,omitempty"`
   254  }
   255  
   256  // StepOverrideType is used to specify whether the existing step should be replaced (default), new step(s) should be
   257  // prepended before the existing step, or new step(s) should be appended after the existing step.
   258  type StepOverrideType string
   259  
   260  // The available override types
   261  const (
   262  	StepOverrideReplace StepOverrideType = "replace"
   263  	StepOverrideBefore  StepOverrideType = "before"
   264  	StepOverrideAfter   StepOverrideType = "after"
   265  )
   266  
   267  // PipelineOverride allows for overriding named steps, stages, or pipelines in the build pack or default pipeline
   268  type PipelineOverride struct {
   269  	Pipeline         string              `json:"pipeline,omitempty"`
   270  	Stage            string              `json:"stage,omitempty"`
   271  	Name             string              `json:"name,omitempty"`
   272  	Step             *Step               `json:"step,omitempty"`
   273  	Steps            []*Step             `json:"steps,omitempty"`
   274  	Type             *StepOverrideType   `json:"type,omitempty"`
   275  	Agent            *Agent              `json:"agent,omitempty"`
   276  	ContainerOptions *corev1.Container   `json:"containerOptions,omitempty"`
   277  	Sidecars         []*corev1.Container `json:"sidecars,omitempty"`
   278  	Volumes          []*corev1.Volume    `json:"volumes,omitempty"`
   279  }
   280  
   281  var _ apis.Validatable = (*ParsedPipeline)(nil)
   282  
   283  // stageLabelName replaces invalid characters in stage names for label usage.
   284  func (s *Stage) stageLabelName() string {
   285  	return MangleToRfc1035Label(s.Name, "")
   286  }
   287  
   288  // GroovyBlock returns the groovy expression for this step
   289  // Legacy code for Jenkinsfile generation
   290  func (s *Step) GroovyBlock(parentIndent string) string {
   291  	var buffer bytes.Buffer
   292  	indent := parentIndent
   293  	if s.Comment != "" {
   294  		buffer.WriteString(indent)
   295  		buffer.WriteString("// ")
   296  		buffer.WriteString(s.Comment)
   297  		buffer.WriteString("\n")
   298  	}
   299  	if s.GetImage() != "" {
   300  		buffer.WriteString(indent)
   301  		buffer.WriteString("container('")
   302  		buffer.WriteString(s.GetImage())
   303  		buffer.WriteString("') {\n")
   304  	} else if s.Dir != "" {
   305  		buffer.WriteString(indent)
   306  		buffer.WriteString("dir('")
   307  		buffer.WriteString(s.Dir)
   308  		buffer.WriteString("') {\n")
   309  	} else if s.GetFullCommand() != "" {
   310  		buffer.WriteString(indent)
   311  		buffer.WriteString("sh \"")
   312  		buffer.WriteString(s.GetFullCommand())
   313  		buffer.WriteString("\"\n")
   314  	} else if s.Groovy != "" {
   315  		lines := strings.Split(s.Groovy, "\n")
   316  		lastIdx := len(lines) - 1
   317  		for i, line := range lines {
   318  			buffer.WriteString(indent)
   319  			buffer.WriteString(line)
   320  			if i >= lastIdx && len(s.Steps) > 0 {
   321  				buffer.WriteString(" {")
   322  			}
   323  			buffer.WriteString("\n")
   324  		}
   325  	}
   326  	childIndent := indent + "  "
   327  	for _, child := range s.Steps {
   328  		buffer.WriteString(child.GroovyBlock(childIndent))
   329  	}
   330  	return buffer.String()
   331  }
   332  
   333  // ToJenkinsfileStatements converts the step to one or more jenkinsfile statements
   334  // Legacy code for Jenkinsfile generation
   335  func (s *Step) ToJenkinsfileStatements() []*util.Statement {
   336  	statements := []*util.Statement{}
   337  	if s.Comment != "" {
   338  		statements = append(statements, &util.Statement{
   339  			Statement: "",
   340  		}, &util.Statement{
   341  			Statement: "// " + s.Comment,
   342  		})
   343  	}
   344  	if s.GetImage() != "" {
   345  		statements = append(statements, &util.Statement{
   346  			Function:  "container",
   347  			Arguments: []string{s.GetImage()},
   348  		})
   349  	} else if s.Dir != "" {
   350  		statements = append(statements, &util.Statement{
   351  			Function:  "dir",
   352  			Arguments: []string{s.Dir},
   353  		})
   354  	} else if s.GetFullCommand() != "" {
   355  		statements = append(statements, &util.Statement{
   356  			Statement: "sh \"" + s.GetFullCommand() + "\"",
   357  		})
   358  	} else if s.Groovy != "" {
   359  		lines := strings.Split(s.Groovy, "\n")
   360  		for _, line := range lines {
   361  			statements = append(statements, &util.Statement{
   362  				Statement: line,
   363  			})
   364  		}
   365  	}
   366  	if len(statements) > 0 {
   367  		last := statements[len(statements)-1]
   368  		for _, c := range s.Steps {
   369  			last.Children = append(last.Children, c.ToJenkinsfileStatements()...)
   370  		}
   371  	}
   372  	return statements
   373  }
   374  
   375  // Validate validates the step is populated correctly
   376  // Legacy code for Jenkinsfile generation
   377  func (s *Step) Validate() error {
   378  	if len(s.Steps) > 0 || s.GetCommand() != "" {
   379  		return nil
   380  	}
   381  	return fmt.Errorf("invalid step %#v as no child steps or command", s)
   382  }
   383  
   384  // PutAllEnvVars puts all the defined environment variables in the given map
   385  // Legacy code for Jenkinsfile generation
   386  func (s *Step) PutAllEnvVars(m map[string]string) {
   387  	for _, step := range s.Steps {
   388  		step.PutAllEnvVars(m)
   389  	}
   390  }
   391  
   392  // GetCommand gets the step's command to execute, opting for Command if set, then Sh.
   393  func (s *Step) GetCommand() string {
   394  	if s.Command != "" {
   395  		return s.Command
   396  	}
   397  
   398  	return s.Sh
   399  }
   400  
   401  // GetFullCommand gets the full command to execute, including arguments.
   402  func (s *Step) GetFullCommand() string {
   403  	cmd := s.GetCommand()
   404  
   405  	// If GetCommand() was an empty string, don't deal with arguments, just return.
   406  	if len(s.Arguments) > 0 && cmd != "" {
   407  		cmd = fmt.Sprintf("%s %s", cmd, strings.Join(s.Arguments, " "))
   408  	}
   409  
   410  	return cmd
   411  }
   412  
   413  // GetImage gets the step's image to run on, opting for Image if set, then Container.
   414  func (s *Step) GetImage() string {
   415  	if s.Image != "" {
   416  		return s.Image
   417  	}
   418  	if s.Agent != nil && s.Agent.Image != "" {
   419  		return s.Agent.Image
   420  	}
   421  
   422  	return s.Container
   423  }
   424  
   425  // DeepCopyForParsedPipeline returns a copy of the Agent with deprecated fields migrated to current ones.
   426  func (a *Agent) DeepCopyForParsedPipeline() *Agent {
   427  	agent := a.DeepCopy()
   428  	if agent.Container != "" {
   429  		agent.Image = agent.GetImage()
   430  		agent.Container = ""
   431  		agent.Label = ""
   432  	}
   433  
   434  	return agent
   435  }
   436  
   437  // Groovy returns the agent groovy expression for the agent or `any` if its blank
   438  // Legacy code for Jenkinsfile generation
   439  func (a *Agent) Groovy() string {
   440  	if a.Label != "" {
   441  		return fmt.Sprintf(`{
   442      label "%s"
   443    }`, a.Label)
   444  	}
   445  	// lets use any for Prow
   446  	return "any"
   447  }
   448  
   449  // GetImage gets the agent's image to run on, opting for Image if set, then Container.
   450  func (a *Agent) GetImage() string {
   451  	if a.Image != "" {
   452  		return a.Image
   453  	}
   454  
   455  	return a.Container
   456  }
   457  
   458  // MangleToRfc1035Label - Task/Step names need to be RFC 1035/1123 compliant DNS labels, so we mangle
   459  // them to make them compliant. Results should match the following regex and be
   460  // no more than 63 characters long:
   461  //     [a-z]([-a-z0-9]*[a-z0-9])?
   462  // cf. https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
   463  // body is assumed to have at least one ASCII letter.
   464  // suffix is assumed to be alphanumeric and non-empty.
   465  // TODO: Combine with kube.ToValidName (that function needs to handle lengths)
   466  func MangleToRfc1035Label(body string, suffix string) string {
   467  	const maxLabelLength = 63
   468  	maxBodyLength := maxLabelLength
   469  	if len(suffix) > 0 {
   470  		maxBodyLength = maxLabelLength - len(suffix) - 1 // Add an extra hyphen before the suffix
   471  	}
   472  	var sb strings.Builder
   473  	bufferedHyphen := false // Used to make sure we don't output consecutive hyphens.
   474  	for _, codepoint := range body {
   475  		toWrite := 0
   476  		if sb.Len() != 0 { // Digits and hyphens aren't allowed to be the first character
   477  			if codepoint == ' ' || codepoint == '-' || codepoint == '.' {
   478  				bufferedHyphen = true
   479  			} else if codepoint >= '0' && codepoint <= '9' {
   480  				toWrite = 1
   481  			}
   482  		}
   483  
   484  		if codepoint >= 'A' && codepoint <= 'Z' {
   485  			codepoint += ('a' - 'A') // Offset to make character lowercase
   486  			toWrite = 1
   487  		} else if codepoint >= 'a' && codepoint <= 'z' {
   488  			toWrite = 1
   489  		}
   490  
   491  		if toWrite > 0 {
   492  			if bufferedHyphen {
   493  				toWrite++
   494  			}
   495  			if sb.Len()+toWrite > maxBodyLength {
   496  				break
   497  			}
   498  			if bufferedHyphen {
   499  				sb.WriteRune('-')
   500  				bufferedHyphen = false
   501  			}
   502  			sb.WriteRune(codepoint)
   503  		}
   504  	}
   505  
   506  	if suffix != "" {
   507  		sb.WriteRune('-')
   508  		sb.WriteString(suffix)
   509  	}
   510  	return sb.String()
   511  }
   512  
   513  // GetEnv gets the environment for the ParsedPipeline, returning Env first and Environment if Env isn't populated.
   514  func (j *ParsedPipeline) GetEnv() []corev1.EnvVar {
   515  	if j != nil {
   516  		if len(j.Env) > 0 {
   517  			return j.Env
   518  		}
   519  
   520  		return j.Environment
   521  	}
   522  	return []corev1.EnvVar{}
   523  }
   524  
   525  // GetEnv gets the environment for the Stage, returning Env first and Environment if Env isn't populated.
   526  func (s *Stage) GetEnv() []corev1.EnvVar {
   527  	if len(s.Env) > 0 {
   528  		return s.Env
   529  	}
   530  
   531  	return s.Environment
   532  }
   533  
   534  // Validate checks the ParsedPipeline to find any errors in it, without validating against the cluster.
   535  func (j *ParsedPipeline) Validate(context context.Context) *apis.FieldError {
   536  	return j.ValidateInCluster(context, nil, "")
   537  }
   538  
   539  // ValidateInCluster checks the parsed ParsedPipeline to find any errors in it, including validation against the cluster.
   540  func (j *ParsedPipeline) ValidateInCluster(context context.Context, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   541  	if err := validateAgent(j.Agent).ViaField("agent"); err != nil {
   542  		return err
   543  	}
   544  
   545  	var volumes []*corev1.Volume
   546  	if j.Options != nil && len(j.Options.Volumes) > 0 {
   547  		volumes = append(volumes, j.Options.Volumes...)
   548  	}
   549  	if err := validateStages(j.Stages, j.Agent, volumes, kubeClient, ns); err != nil {
   550  		return err
   551  	}
   552  
   553  	if err := validateStageNames(j); err != nil {
   554  		return err
   555  	}
   556  
   557  	if err := validateRootOptions(j.Options, volumes, kubeClient, ns).ViaField("options"); err != nil {
   558  		return err
   559  	}
   560  
   561  	return nil
   562  }
   563  
   564  func validateAgent(a *Agent) *apis.FieldError {
   565  	// TODO: This is the same whether you specify an agent without label or image, or if you don't specify an agent
   566  	// at all, which is nonoptimal.
   567  	if a != nil {
   568  		if a.Container != "" {
   569  			return &apis.FieldError{
   570  				Message: "the container field is deprecated - please use image instead",
   571  				Paths:   []string{"container"},
   572  			}
   573  		}
   574  		if a.Dir != "" {
   575  			return &apis.FieldError{
   576  				Message: "the dir field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it.",
   577  				Paths:   []string{"dir"},
   578  			}
   579  		}
   580  
   581  		if a.Image != "" && a.Label != "" {
   582  			return apis.ErrMultipleOneOf("label", "image")
   583  		}
   584  
   585  		if a.Image == "" && a.Label == "" {
   586  			return apis.ErrMissingOneOf("label", "image")
   587  		}
   588  	}
   589  
   590  	return nil
   591  }
   592  
   593  var containsASCIILetter = regexp.MustCompile(`[a-zA-Z]`).MatchString
   594  
   595  func validateStage(s Stage, parentAgent *Agent, parentVolumes []*corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   596  	if len(s.Steps) == 0 && len(s.Stages) == 0 && len(s.Parallel) == 0 {
   597  		return apis.ErrMissingOneOf("steps", "stages", "parallel")
   598  	}
   599  
   600  	if !containsASCIILetter(s.Name) {
   601  		return &apis.FieldError{
   602  			Message: "Stage name must contain at least one ASCII letter",
   603  			Paths:   []string{"name"},
   604  		}
   605  	}
   606  
   607  	var volumes []*corev1.Volume
   608  
   609  	volumes = append(volumes, parentVolumes...)
   610  	if s.Options != nil && s.Options.RootOptions != nil && len(s.Options.Volumes) > 0 {
   611  		volumes = append(volumes, s.Options.Volumes...)
   612  	}
   613  
   614  	stageAgent := s.Agent.DeepCopy()
   615  	if stageAgent == nil {
   616  		stageAgent = parentAgent.DeepCopy()
   617  	}
   618  
   619  	if stageAgent == nil {
   620  		return &apis.FieldError{
   621  			Message: "No agent specified for stage or for its parent(s)",
   622  			Paths:   []string{"agent"},
   623  		}
   624  	}
   625  
   626  	if len(s.Steps) > 0 {
   627  		if len(s.Stages) > 0 || len(s.Parallel) > 0 {
   628  			return apis.ErrMultipleOneOf("steps", "stages", "parallel")
   629  		}
   630  		seenStepNames := make(map[string]int)
   631  		for i, step := range s.Steps {
   632  			if err := validateStep(step).ViaFieldIndex("steps", i); err != nil {
   633  				return err
   634  			}
   635  			if step.Name != "" {
   636  				if count, exists := seenStepNames[step.Name]; exists {
   637  					seenStepNames[step.Name] = count + 1
   638  				} else {
   639  					seenStepNames[step.Name] = 1
   640  				}
   641  			}
   642  		}
   643  
   644  		var duplicateSteps []string
   645  		for k, v := range seenStepNames {
   646  			if v > 1 {
   647  				duplicateSteps = append(duplicateSteps, k)
   648  			}
   649  		}
   650  		if len(duplicateSteps) > 0 {
   651  			sort.Strings(duplicateSteps)
   652  			return &apis.FieldError{
   653  				Message: "step names within a stage must be unique",
   654  				Details: fmt.Sprintf("The following step names in the stage %s are used more than once: %s", s.Name, strings.Join(duplicateSteps, ", ")),
   655  				Paths:   []string{"steps"},
   656  			}
   657  		}
   658  	}
   659  
   660  	if len(s.Stages) > 0 {
   661  		if len(s.Parallel) > 0 {
   662  			return apis.ErrMultipleOneOf("steps", "stages", "parallel")
   663  		}
   664  		for i, stage := range s.Stages {
   665  			if err := validateStage(stage, parentAgent, volumes, kubeClient, ns).ViaFieldIndex("stages", i); err != nil {
   666  				return err
   667  			}
   668  		}
   669  	}
   670  
   671  	if len(s.Parallel) > 0 {
   672  		for i, stage := range s.Parallel {
   673  			if err := validateStage(stage, parentAgent, volumes, kubeClient, ns).ViaFieldIndex("parallel", i); err != nil {
   674  				return nil
   675  			}
   676  		}
   677  	}
   678  
   679  	return validateStageOptions(s.Options, volumes, kubeClient, ns).ViaField("options")
   680  }
   681  
   682  func moreThanOneAreTrue(vals ...bool) bool {
   683  	count := 0
   684  
   685  	for _, v := range vals {
   686  		if v {
   687  			count++
   688  		}
   689  	}
   690  
   691  	return count > 1
   692  }
   693  
   694  func validateStep(s Step) *apis.FieldError {
   695  	// Special cases for when you use legacy build pack syntax inside a pipeline definition
   696  	if s.Container != "" {
   697  		return &apis.FieldError{
   698  			Message: "the container field is deprecated - please use image instead",
   699  			Paths:   []string{"container"},
   700  		}
   701  	}
   702  	if s.Groovy != "" {
   703  		return &apis.FieldError{
   704  			Message: "the groovy field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it.",
   705  			Paths:   []string{"groovy"},
   706  		}
   707  	}
   708  	if s.Comment != "" {
   709  		return &apis.FieldError{
   710  			Message: "the comment field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it.",
   711  			Paths:   []string{"comment"},
   712  		}
   713  	}
   714  	if s.When != "" {
   715  		return &apis.FieldError{
   716  			Message: "the when field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it.",
   717  			Paths:   []string{"when"},
   718  		}
   719  	}
   720  	if len(s.Steps) > 0 {
   721  		return &apis.FieldError{
   722  			Message: "the steps field is only valid in legacy build packs, not in jenkins-x.yml. Please remove it and list the nested stages sequentially instead.",
   723  			Paths:   []string{"steps"},
   724  		}
   725  	}
   726  
   727  	if s.GetCommand() == "" && s.Step == "" && s.Loop == nil {
   728  		return apis.ErrMissingOneOf("command", "step", "loop")
   729  	}
   730  
   731  	if moreThanOneAreTrue(s.GetCommand() != "", s.Step != "", s.Loop != nil) {
   732  		return apis.ErrMultipleOneOf("command", "step", "loop")
   733  	}
   734  
   735  	if (s.GetCommand() != "" || s.Loop != nil) && len(s.Options) != 0 {
   736  		return &apis.FieldError{
   737  			Message: "Cannot set options for a command or a loop",
   738  			Paths:   []string{"options"},
   739  		}
   740  	}
   741  
   742  	if (s.Step != "" || s.Loop != nil) && len(s.Arguments) != 0 {
   743  		return &apis.FieldError{
   744  			Message: "Cannot set command-line arguments for a step or a loop",
   745  			Paths:   []string{"args"},
   746  		}
   747  	}
   748  
   749  	if err := validateLoop(s.Loop); err != nil {
   750  		return err.ViaField("loop")
   751  	}
   752  
   753  	if s.Agent != nil {
   754  		return validateAgent(s.Agent).ViaField("agent")
   755  	}
   756  	return nil
   757  }
   758  
   759  func validateLoop(l *Loop) *apis.FieldError {
   760  	if l != nil {
   761  		if l.Variable == "" {
   762  			return apis.ErrMissingField("variable")
   763  		}
   764  
   765  		if len(l.Steps) == 0 {
   766  			return apis.ErrMissingField("steps")
   767  		}
   768  
   769  		if len(l.Values) == 0 {
   770  			return apis.ErrMissingField("values")
   771  		}
   772  
   773  		for i, step := range l.Steps {
   774  			if err := validateStep(step).ViaFieldIndex("steps", i); err != nil {
   775  				return err
   776  			}
   777  		}
   778  	}
   779  
   780  	return nil
   781  }
   782  
   783  func validateStages(stages []Stage, parentAgent *Agent, parentVolumes []*corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   784  	if len(stages) == 0 {
   785  		return apis.ErrMissingField("stages")
   786  	}
   787  
   788  	for i, s := range stages {
   789  		if err := validateStage(s, parentAgent, parentVolumes, kubeClient, ns).ViaFieldIndex("stages", i); err != nil {
   790  			return err
   791  		}
   792  	}
   793  
   794  	return nil
   795  }
   796  
   797  func validateRootOptions(o *RootOptions, volumes []*corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   798  	if o != nil {
   799  		if o.Timeout != nil {
   800  			if err := validateTimeout(o.Timeout); err != nil {
   801  				return err.ViaField("timeout")
   802  			}
   803  		}
   804  
   805  		// TODO: retry will default to 0, so we're kinda stuck checking if it's less than zero here.
   806  		if o.Retry < 0 {
   807  			return &apis.FieldError{
   808  				Message: "Retry count cannot be negative",
   809  				Paths:   []string{"retry"},
   810  			}
   811  		}
   812  
   813  		for i, v := range o.Volumes {
   814  			if err := validateVolume(v, kubeClient, ns).ViaFieldIndex("volumes", i); err != nil {
   815  				return err
   816  			}
   817  		}
   818  
   819  		for i, s := range o.Sidecars {
   820  			if err := validateSidecarContainer(s, volumes).ViaFieldIndex("sidecars", i); err != nil {
   821  				return err
   822  			}
   823  		}
   824  
   825  		return validateContainerOptions(o.ContainerOptions, volumes).ViaField("containerOptions")
   826  	}
   827  
   828  	return nil
   829  }
   830  
   831  func validateVolume(v *corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   832  	if v != nil {
   833  		if v.Name == "" {
   834  			return apis.ErrMissingField("name")
   835  		}
   836  		if kubeClient != nil {
   837  			if v.Secret != nil {
   838  				_, err := kubeClient.CoreV1().Secrets(ns).Get(v.Secret.SecretName, metav1.GetOptions{})
   839  				if err != nil {
   840  					return &apis.FieldError{
   841  						Message: fmt.Sprintf("Secret %s does not exist, so cannot be used as a volume", v.Secret.SecretName),
   842  						Paths:   []string{"secretName"},
   843  					}
   844  				}
   845  			} else if v.PersistentVolumeClaim != nil {
   846  				_, err := kubeClient.CoreV1().PersistentVolumeClaims(ns).Get(v.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
   847  				if err != nil {
   848  					return &apis.FieldError{
   849  						Message: fmt.Sprintf("PVC %s does not exist, so cannot be used as a volume", v.PersistentVolumeClaim.ClaimName),
   850  						Paths:   []string{"claimName"},
   851  					}
   852  				}
   853  			}
   854  		}
   855  	}
   856  
   857  	return nil
   858  }
   859  
   860  func validateContainerOptions(c *corev1.Container, volumes []*corev1.Volume) *apis.FieldError {
   861  	if c != nil {
   862  		if len(c.Command) != 0 {
   863  			return &apis.FieldError{
   864  				Message: "Command cannot be specified in containerOptions",
   865  				Paths:   []string{"command"},
   866  			}
   867  		}
   868  		if len(c.Args) != 0 {
   869  			return &apis.FieldError{
   870  				Message: "Arguments cannot be specified in containerOptions",
   871  				Paths:   []string{"args"},
   872  			}
   873  		}
   874  		if c.Image != "" {
   875  			return &apis.FieldError{
   876  				Message: "Image cannot be specified in containerOptions",
   877  				Paths:   []string{"image"},
   878  			}
   879  		}
   880  		if c.WorkingDir != "" {
   881  			return &apis.FieldError{
   882  				Message: "WorkingDir cannot be specified in containerOptions",
   883  				Paths:   []string{"workingDir"},
   884  			}
   885  		}
   886  		if c.Name != "" {
   887  			return &apis.FieldError{
   888  				Message: "Name cannot be specified in containerOptions",
   889  				Paths:   []string{"name"},
   890  			}
   891  		}
   892  		if c.Stdin {
   893  			return &apis.FieldError{
   894  				Message: "Stdin cannot be specified in containerOptions",
   895  				Paths:   []string{"stdin"},
   896  			}
   897  		}
   898  		if c.TTY {
   899  			return &apis.FieldError{
   900  				Message: "TTY cannot be specified in containerOptions",
   901  				Paths:   []string{"tty"},
   902  			}
   903  		}
   904  		if len(c.VolumeMounts) > 0 {
   905  			for i, m := range c.VolumeMounts {
   906  				if !isVolumeMountValid(m, volumes) {
   907  					fieldErr := &apis.FieldError{
   908  						Message: fmt.Sprintf("Volume mount name %s not found in volumes for stage or pipeline", m.Name),
   909  						Paths:   []string{"name"},
   910  					}
   911  
   912  					return fieldErr.ViaFieldIndex("volumeMounts", i)
   913  				}
   914  			}
   915  		}
   916  	}
   917  
   918  	return nil
   919  }
   920  
   921  func validateSidecarContainer(c *corev1.Container, volumes []*corev1.Volume) *apis.FieldError {
   922  	if c != nil {
   923  		if c.Name == "" {
   924  			return &apis.FieldError{
   925  				Message: "Name cannot be empty in sidecar",
   926  				Paths:   []string{"name"},
   927  			}
   928  		}
   929  		if c.Image == "" {
   930  			return &apis.FieldError{
   931  				Message: "Image cannot be empty in sidecar",
   932  				Paths:   []string{"image"},
   933  			}
   934  		}
   935  		if len(c.VolumeMounts) > 0 {
   936  			for i, m := range c.VolumeMounts {
   937  				if !isVolumeMountValid(m, volumes) {
   938  					fieldErr := &apis.FieldError{
   939  						Message: fmt.Sprintf("Volume mount name %s not found in volumes for stage or pipeline", m.Name),
   940  						Paths:   []string{"name"},
   941  					}
   942  
   943  					return fieldErr.ViaFieldIndex("volumeMounts", i)
   944  				}
   945  			}
   946  		}
   947  	}
   948  
   949  	return nil
   950  }
   951  
   952  func isVolumeMountValid(mount corev1.VolumeMount, volumes []*corev1.Volume) bool {
   953  	foundVolume := false
   954  
   955  	for _, v := range volumes {
   956  		if v.Name == mount.Name {
   957  			foundVolume = true
   958  			break
   959  		}
   960  	}
   961  
   962  	return foundVolume
   963  }
   964  
   965  func validateStageOptions(o *StageOptions, volumes []*corev1.Volume, kubeClient kubernetes.Interface, ns string) *apis.FieldError {
   966  	if o != nil {
   967  		if err := validateStash(o.Stash); err != nil {
   968  			return err.ViaField("stash")
   969  		}
   970  
   971  		if o.Unstash != nil {
   972  			if err := validateUnstash(o.Unstash); err != nil {
   973  				return err.ViaField("unstash")
   974  			}
   975  		}
   976  
   977  		if o.Workspace != nil {
   978  			if err := validateWorkspace(*o.Workspace); err != nil {
   979  				return err
   980  			}
   981  		}
   982  
   983  		if o.RootOptions != nil && o.RootOptions.DistributeParallelAcrossNodes {
   984  			return &apis.FieldError{
   985  				Message: "distributeParallelAcrossNodes cannot be used in a stage",
   986  				Paths:   []string{"distributeParallelAcrossNodes"},
   987  			}
   988  		}
   989  
   990  		return validateRootOptions(o.RootOptions, volumes, kubeClient, ns)
   991  	}
   992  
   993  	return nil
   994  }
   995  
   996  func validateTimeout(t *Timeout) *apis.FieldError {
   997  	if t != nil {
   998  		isAllowed := false
   999  		for _, allowed := range allTimeoutUnits {
  1000  			if t.Unit == allowed {
  1001  				isAllowed = true
  1002  			}
  1003  		}
  1004  
  1005  		if !isAllowed {
  1006  			return &apis.FieldError{
  1007  				Message: fmt.Sprintf("%s is not a valid time unit. Valid time units are %s", string(t.Unit),
  1008  					strings.Join(allTimeoutUnitsAsStrings(), ", ")),
  1009  				Paths: []string{"unit"},
  1010  			}
  1011  		}
  1012  
  1013  		if t.Time < 1 {
  1014  			return &apis.FieldError{
  1015  				Message: "Timeout must be greater than zero",
  1016  				Paths:   []string{"time"},
  1017  			}
  1018  		}
  1019  	}
  1020  
  1021  	return nil
  1022  }
  1023  
  1024  func validateUnstash(u *Unstash) *apis.FieldError {
  1025  	if u != nil {
  1026  		// TODO: Check to make sure the corresponding stash is defined somewhere
  1027  		if u.Name == "" {
  1028  			return &apis.FieldError{
  1029  				Message: "The unstash name must be provided",
  1030  				Paths:   []string{"name"},
  1031  			}
  1032  		}
  1033  	}
  1034  
  1035  	return nil
  1036  }
  1037  
  1038  func validateStash(s *Stash) *apis.FieldError {
  1039  	if s != nil {
  1040  		if s.Name == "" {
  1041  			return &apis.FieldError{
  1042  				Message: "The stash name must be provided",
  1043  				Paths:   []string{"name"},
  1044  			}
  1045  		}
  1046  		if s.Files == "" {
  1047  			return &apis.FieldError{
  1048  				Message: "files to stash must be provided",
  1049  				Paths:   []string{"files"},
  1050  			}
  1051  		}
  1052  	}
  1053  
  1054  	return nil
  1055  }
  1056  
  1057  func validateWorkspace(w string) *apis.FieldError {
  1058  	if w == "" {
  1059  		return &apis.FieldError{
  1060  			Message: "The workspace name must be unspecified or non-empty",
  1061  			Paths:   []string{"workspace"},
  1062  		}
  1063  	}
  1064  
  1065  	return nil
  1066  }
  1067  
  1068  // EnvMapToSlice transforms a map of environment variables into a slice that can be used in container configuration
  1069  func EnvMapToSlice(envMap map[string]corev1.EnvVar) []corev1.EnvVar {
  1070  	env := make([]corev1.EnvVar, 0, len(envMap))
  1071  
  1072  	// Avoid nondeterministic results by sorting the keys and appending vars in that order.
  1073  	var envVars []string
  1074  	for k := range envMap {
  1075  		envVars = append(envVars, k)
  1076  	}
  1077  	sort.Strings(envVars)
  1078  
  1079  	for _, envVar := range envVars {
  1080  		env = append(env, envMap[envVar])
  1081  	}
  1082  
  1083  	return env
  1084  }
  1085  
  1086  // GetPodLabels returns the optional additional labels to apply to all pods for this pipeline. The labels and their values
  1087  // will be converted to RFC1035-compliant strings.
  1088  func (j *ParsedPipeline) GetPodLabels() map[string]string {
  1089  	sanitizedLabels := make(map[string]string)
  1090  	if j.Options != nil {
  1091  		for k, v := range j.Options.PodLabels {
  1092  			sanitizedKey := MangleToRfc1035Label(k, "")
  1093  			sanitizedValue := MangleToRfc1035Label(v, "")
  1094  			if sanitizedKey != k || sanitizedValue != v {
  1095  				log.Logger().Infof("Converted custom label/value '%s' to '%s' to conform to Kubernetes label requirements",
  1096  					util.ColorInfo(k+"="+v), util.ColorInfo(sanitizedKey+"="+sanitizedValue))
  1097  			}
  1098  			sanitizedLabels[sanitizedKey] = sanitizedValue
  1099  		}
  1100  	}
  1101  	return sanitizedLabels
  1102  }
  1103  
  1104  // GetTolerations returns the tolerations configured in the root options for this pipeline, if any.
  1105  func (j *ParsedPipeline) GetTolerations() []corev1.Toleration {
  1106  	if j.Options != nil {
  1107  		return j.Options.Tolerations
  1108  	}
  1109  	return nil
  1110  }
  1111  
  1112  // GetPossibleAffinityPolicy takes the pipeline name and returns the appropriate affinity policy for pods in this
  1113  // pipeline given its configuration, specifically of options.distributeParallelAcrossNodes.
  1114  func (j *ParsedPipeline) GetPossibleAffinityPolicy(name string) *corev1.Affinity {
  1115  	if j.Options != nil && j.Options.DistributeParallelAcrossNodes {
  1116  
  1117  		antiAffinityLabels := make(map[string]string)
  1118  		if len(j.Options.PodLabels) > 0 {
  1119  			antiAffinityLabels = util.MergeMaps(j.GetPodLabels())
  1120  		} else {
  1121  			antiAffinityLabels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = name
  1122  		}
  1123  		return &corev1.Affinity{
  1124  			PodAntiAffinity: &corev1.PodAntiAffinity{
  1125  				RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{
  1126  					LabelSelector: &metav1.LabelSelector{
  1127  						MatchLabels: antiAffinityLabels,
  1128  					},
  1129  					TopologyKey: "kubernetes.io/hostname",
  1130  				}},
  1131  			},
  1132  		}
  1133  	}
  1134  	return nil
  1135  }
  1136  
  1137  // StepPlaceholderReplacementArgs specifies the arguments required for replacing placeholders in build pack directories.
  1138  type StepPlaceholderReplacementArgs struct {
  1139  	WorkspaceDir         string
  1140  	GitName              string
  1141  	GitOrg               string
  1142  	GitHost              string
  1143  	DockerRegistry       string
  1144  	DockerRegistryOrg    string
  1145  	DockerRegistryMirror string
  1146  	ProjectID            string
  1147  	KanikoImage          string
  1148  	UseKaniko            bool
  1149  }
  1150  
  1151  func (p *StepPlaceholderReplacementArgs) workingDirAsPointer() *string {
  1152  	// TODO: Is there a better way to ensure that we're creating a pointer of a copy of the string?
  1153  	copyOfWorkingDir := p.WorkspaceDir
  1154  	return &copyOfWorkingDir
  1155  }
  1156  
  1157  // ReplacePlaceholdersInStepAndStageDirs traverses this pipeline's stages and any nested stages for any steps (and any nested steps)
  1158  // within the stages, and replaces "REPLACE_ME_..." placeholders in those steps' directories.
  1159  func (j *ParsedPipeline) ReplacePlaceholdersInStepAndStageDirs(args StepPlaceholderReplacementArgs) {
  1160  	var stages []Stage
  1161  	for _, s := range j.Stages {
  1162  		s.replacePlaceholdersInStage(j.WorkingDir, args)
  1163  		stages = append(stages, s)
  1164  	}
  1165  	j.Stages = stages
  1166  }
  1167  
  1168  func (s *Stage) replacePlaceholdersInStage(parentDir *string, args StepPlaceholderReplacementArgs) {
  1169  	var steps []Step
  1170  	var stages []Stage
  1171  	var parallel []Stage
  1172  	// If there's no working directory and this stage contains steps, we should set a stage directory
  1173  	if s.WorkingDir == nil && len(s.Steps) > 0 {
  1174  		// If there's no parent working directory, use the default provided.
  1175  		if parentDir == nil {
  1176  			s.WorkingDir = args.workingDirAsPointer()
  1177  		} else {
  1178  			s.WorkingDir = parentDir
  1179  		}
  1180  	}
  1181  	s.WorkingDir = replacePlaceholdersInDir(s.WorkingDir, args)
  1182  	for _, step := range s.Steps {
  1183  		step.replacePlaceholdersInStep(args)
  1184  		steps = append(steps, step)
  1185  	}
  1186  	for _, nested := range s.Stages {
  1187  		nested.replacePlaceholdersInStage(s.WorkingDir, args)
  1188  		stages = append(stages, nested)
  1189  	}
  1190  	for _, p := range s.Parallel {
  1191  		p.replacePlaceholdersInStage(s.WorkingDir, args)
  1192  		parallel = append(parallel, p)
  1193  	}
  1194  	s.Steps = steps
  1195  	s.Stages = stages
  1196  	s.Parallel = parallel
  1197  }
  1198  
  1199  func replacePlaceholdersInDir(originalDir *string, args StepPlaceholderReplacementArgs) *string {
  1200  	if originalDir == nil || *originalDir == "" {
  1201  		return originalDir
  1202  	}
  1203  	dir := *originalDir
  1204  	// Replace the Go buildpack path with the correct location for Tekton builds.
  1205  	dir = strings.Replace(dir, "/home/jenkins/go/src/REPLACE_ME_GIT_PROVIDER/REPLACE_ME_ORG/REPLACE_ME_APP_NAME", args.WorkspaceDir, -1)
  1206  
  1207  	dir = strings.Replace(dir, util.PlaceHolderAppName, strings.ToLower(args.GitName), -1)
  1208  	dir = strings.Replace(dir, util.PlaceHolderOrg, strings.ToLower(args.GitOrg), -1)
  1209  	dir = strings.Replace(dir, util.PlaceHolderGitProvider, strings.ToLower(args.GitHost), -1)
  1210  	dir = strings.Replace(dir, util.PlaceHolderDockerRegistryOrg, strings.ToLower(args.DockerRegistryOrg), -1)
  1211  
  1212  	if strings.HasPrefix(dir, "./") {
  1213  		dir = args.WorkspaceDir + strings.TrimPrefix(dir, ".")
  1214  	}
  1215  	if !filepath.IsAbs(dir) {
  1216  		dir = filepath.Join(args.WorkspaceDir, dir)
  1217  	}
  1218  	return &dir
  1219  }
  1220  
  1221  func (s *Step) replacePlaceholdersInStep(args StepPlaceholderReplacementArgs) {
  1222  	if s.GetCommand() != "" {
  1223  		s.modifyStep(args)
  1224  		s.Dir = *replacePlaceholdersInDir(&s.Dir, args)
  1225  	}
  1226  	var steps []*Step
  1227  	for _, nested := range s.Steps {
  1228  		nested.replacePlaceholdersInStep(args)
  1229  		steps = append(steps, nested)
  1230  	}
  1231  	s.Steps = steps
  1232  	if s.Loop != nil {
  1233  		var loopSteps []Step
  1234  		for _, nested := range s.Loop.Steps {
  1235  			nested.replacePlaceholdersInStep(args)
  1236  			loopSteps = append(loopSteps, nested)
  1237  		}
  1238  		s.Loop.Steps = loopSteps
  1239  	}
  1240  }
  1241  
  1242  // modifyStep allows a container step to be modified to do something different
  1243  func (s *Step) modifyStep(params StepPlaceholderReplacementArgs) {
  1244  	if params.UseKaniko {
  1245  		if strings.HasPrefix(s.GetCommand(), "skaffold build") ||
  1246  			(len(s.Arguments) > 0 && strings.HasPrefix(strings.Join(s.Arguments[1:], " "), "skaffold build")) ||
  1247  			commandIsSkaffoldRegex.MatchString(s.GetCommand()) {
  1248  
  1249  			sourceDir := params.WorkspaceDir
  1250  			dockerfile := filepath.Join(sourceDir, "Dockerfile")
  1251  			localRepo := params.DockerRegistry
  1252  			destination := params.DockerRegistry + "/" + params.DockerRegistryOrg + "/" + naming.ToValidName(params.GitName)
  1253  
  1254  			args := []string{"--cache=true", "--cache-dir=/workspace",
  1255  				"--context=" + sourceDir,
  1256  				"--dockerfile=" + dockerfile,
  1257  				"--destination=" + destination + ":${inputs.params.version}",
  1258  				"--cache-repo=" + localRepo + "/" + params.ProjectID + "/cache",
  1259  			}
  1260  			if localRepo != "gcr.io" {
  1261  				args = append(args, "--skip-tls-verify-registry="+localRepo)
  1262  			}
  1263  
  1264  			if ipAddressRegistryRegex.MatchString(localRepo) {
  1265  				args = append(args, "--insecure")
  1266  			}
  1267  
  1268  			if params.DockerRegistryMirror != "" {
  1269  				args = append(args, "--registry-mirror=" + params.DockerRegistryMirror)
  1270  			}
  1271  
  1272  			s.Command = "/kaniko/executor"
  1273  			s.Arguments = args
  1274  
  1275  			s.Image = params.KanikoImage
  1276  		} else if s.GetCommand() == "/kaniko/executor" {
  1277  			args := s.Arguments
  1278  			seen := make(map[string]bool, len(args))
  1279  			for _, arg := range args {
  1280  				if arg == "" || arg[0] != '-' {
  1281  					continue
  1282  				}
  1283  				arg = strings.SplitN(arg, "=", 2)[0]
  1284  				seen[arg] = true
  1285  			}
  1286  
  1287  			localRepo := params.DockerRegistry
  1288  
  1289  			if !seen["--cache-repo"] {
  1290  				args = append(args, "--cache-repo=" + params.DockerRegistry + "/" + params.ProjectID + "/cache")
  1291  			}
  1292  
  1293  			if localRepo != "gcr.io" && !seen["--skip-tls-verify-registry"] {
  1294  				args = append(args, "--skip-tls-verify-registry="+localRepo)
  1295  			}
  1296  
  1297  			if ipAddressRegistryRegex.MatchString(localRepo) && !seen["--insecure"] {
  1298  				args = append(args, "--insecure")
  1299  			}
  1300  
  1301  			if params.DockerRegistryMirror != ""  && !seen["--registry-mirror"] {
  1302  				args = append(args, "--registry-mirror=" + params.DockerRegistryMirror)
  1303  			}
  1304  
  1305  			s.Arguments = args
  1306  		}
  1307  	}
  1308  }
  1309  
  1310  // AddContainerEnvVarsToPipeline allows for adding a slice of container environment variables directly to the
  1311  // pipeline, if they're not already defined.
  1312  func (j *ParsedPipeline) AddContainerEnvVarsToPipeline(origEnv []corev1.EnvVar) {
  1313  	if len(origEnv) > 0 {
  1314  		envMap := make(map[string]corev1.EnvVar)
  1315  
  1316  		// Add the container env vars first.
  1317  		for _, e := range origEnv {
  1318  			if e.ValueFrom == nil {
  1319  				envMap[e.Name] = corev1.EnvVar{
  1320  					Name:  e.Name,
  1321  					Value: e.Value,
  1322  				}
  1323  			}
  1324  		}
  1325  
  1326  		// Overwrite with the existing pipeline environment, if it exists
  1327  		for _, e := range j.GetEnv() {
  1328  			envMap[e.Name] = e
  1329  		}
  1330  
  1331  		env := make([]corev1.EnvVar, 0, len(envMap))
  1332  
  1333  		// Avoid nondeterministic results by sorting the keys and appending vars in that order.
  1334  		var envVars []string
  1335  		for k := range envMap {
  1336  			envVars = append(envVars, k)
  1337  		}
  1338  		sort.Strings(envVars)
  1339  
  1340  		for _, envVar := range envVars {
  1341  			env = append(env, envMap[envVar])
  1342  		}
  1343  
  1344  		j.Env = env
  1345  	}
  1346  }
  1347  
  1348  func scopedEnv(newEnv []corev1.EnvVar, parentEnv []corev1.EnvVar) []corev1.EnvVar {
  1349  	if len(parentEnv) == 0 && len(newEnv) == 0 {
  1350  		return nil
  1351  	}
  1352  	return CombineEnv(newEnv, parentEnv)
  1353  }
  1354  
  1355  // CombineEnv combines the two environments into a single unified slice where
  1356  // the `newEnv` overrides anything in the `parentEnv`
  1357  func CombineEnv(newEnv []corev1.EnvVar, parentEnv []corev1.EnvVar) []corev1.EnvVar {
  1358  	envMap := make(map[string]corev1.EnvVar)
  1359  
  1360  	for _, e := range parentEnv {
  1361  		envMap[e.Name] = e
  1362  	}
  1363  
  1364  	for _, e := range newEnv {
  1365  		envMap[e.Name] = e
  1366  	}
  1367  
  1368  	return EnvMapToSlice(envMap)
  1369  }
  1370  
  1371  type transformedStage struct {
  1372  	Stage Stage
  1373  	// Only one of Sequential, Parallel, and Task is non-empty
  1374  	Sequential []*transformedStage
  1375  	Parallel   []*transformedStage
  1376  	Task       *tektonv1alpha1.Task
  1377  	// PipelineTask is non-empty only if Task is non-empty, but it is populated
  1378  	// after Task is populated so the reverse is not true.
  1379  	PipelineTask *tektonv1alpha1.PipelineTask
  1380  	// The depth of this stage in the full tree of stages
  1381  	Depth int8
  1382  	// The parallel or sequntial stage enclosing this stage, or nil if this stage is at top level
  1383  	EnclosingStage *transformedStage
  1384  	// The stage immediately before this stage at the same depth, or nil if there is no such stage
  1385  	PreviousSiblingStage *transformedStage
  1386  	// TODO: Add the equivalent reverse relationship
  1387  }
  1388  
  1389  func (ts transformedStage) toPipelineStructureStage() v1.PipelineStructureStage {
  1390  	s := v1.PipelineStructureStage{
  1391  		Name:  ts.Stage.Name,
  1392  		Depth: ts.Depth,
  1393  	}
  1394  
  1395  	if ts.EnclosingStage != nil {
  1396  		s.Parent = &ts.EnclosingStage.Stage.Name
  1397  	}
  1398  
  1399  	if ts.PreviousSiblingStage != nil {
  1400  		s.Previous = &ts.PreviousSiblingStage.Stage.Name
  1401  	}
  1402  	// TODO: Add the equivalent reverse relationship
  1403  
  1404  	if ts.PipelineTask != nil {
  1405  		s.TaskRef = &ts.PipelineTask.TaskRef.Name
  1406  	}
  1407  
  1408  	if len(ts.Parallel) > 0 {
  1409  		for _, n := range ts.Parallel {
  1410  			s.Parallel = append(s.Parallel, n.Stage.Name)
  1411  		}
  1412  	}
  1413  
  1414  	if len(ts.Sequential) > 0 {
  1415  		for _, n := range ts.Sequential {
  1416  			s.Stages = append(s.Stages, n.Stage.Name)
  1417  		}
  1418  	}
  1419  
  1420  	return s
  1421  }
  1422  
  1423  func (ts transformedStage) getAllAsPipelineStructureStages() []v1.PipelineStructureStage {
  1424  	var stages []v1.PipelineStructureStage
  1425  
  1426  	stages = append(stages, ts.toPipelineStructureStage())
  1427  
  1428  	if len(ts.Parallel) > 0 {
  1429  		for _, n := range ts.Parallel {
  1430  			stages = append(stages, n.getAllAsPipelineStructureStages()...)
  1431  		}
  1432  	}
  1433  
  1434  	if len(ts.Sequential) > 0 {
  1435  		for _, n := range ts.Sequential {
  1436  			stages = append(stages, n.getAllAsPipelineStructureStages()...)
  1437  		}
  1438  	}
  1439  
  1440  	return stages
  1441  }
  1442  
  1443  func (ts transformedStage) isSequential() bool {
  1444  	return len(ts.Sequential) > 0
  1445  }
  1446  
  1447  func (ts transformedStage) isParallel() bool {
  1448  	return len(ts.Parallel) > 0
  1449  }
  1450  
  1451  func (ts transformedStage) getLinearTasks() []*tektonv1alpha1.Task {
  1452  	if ts.isSequential() {
  1453  		var tasks []*tektonv1alpha1.Task
  1454  		for _, seqTs := range ts.Sequential {
  1455  			tasks = append(tasks, seqTs.getLinearTasks()...)
  1456  		}
  1457  		return tasks
  1458  	} else if ts.isParallel() {
  1459  		var tasks []*tektonv1alpha1.Task
  1460  		for _, parTs := range ts.Parallel {
  1461  			tasks = append(tasks, parTs.getLinearTasks()...)
  1462  		}
  1463  		return tasks
  1464  	} else {
  1465  		return []*tektonv1alpha1.Task{ts.Task}
  1466  	}
  1467  }
  1468  
  1469  // If the workspace is nil, sets it to the parent's workspace
  1470  func (ts *transformedStage) computeWorkspace(parentWorkspace string) {
  1471  	if ts.Stage.Options == nil {
  1472  		ts.Stage.Options = &StageOptions{
  1473  			RootOptions: &RootOptions{},
  1474  		}
  1475  	}
  1476  	if ts.Stage.Options.Workspace == nil {
  1477  		ts.Stage.Options.Workspace = &parentWorkspace
  1478  	}
  1479  }
  1480  
  1481  type stageToTaskParams struct {
  1482  	parentParams         CRDsFromPipelineParams
  1483  	stage                Stage
  1484  	baseWorkingDir       *string
  1485  	parentEnv            []corev1.EnvVar
  1486  	parentAgent          *Agent
  1487  	parentWorkspace      string
  1488  	parentContainer      *corev1.Container
  1489  	parentSidecars       []*corev1.Container
  1490  	parentVolumes        []*corev1.Volume
  1491  	depth                int8
  1492  	enclosingStage       *transformedStage
  1493  	previousSiblingStage *transformedStage
  1494  }
  1495  
  1496  func stageToTask(params stageToTaskParams) (*transformedStage, error) {
  1497  	if len(params.stage.Post) != 0 {
  1498  		return nil, errors.New("post on stages not yet supported")
  1499  	}
  1500  
  1501  	stageContainer := &corev1.Container{}
  1502  	var stageSidecars []*corev1.Container
  1503  	var stageVolumes []*corev1.Volume
  1504  
  1505  	if params.stage.Options != nil {
  1506  		o := params.stage.Options
  1507  		if o.RootOptions == nil {
  1508  			o.RootOptions = &RootOptions{}
  1509  		} else {
  1510  			if o.Timeout != nil {
  1511  				return nil, errors.New("Timeout on stage not yet supported")
  1512  			}
  1513  			if o.ContainerOptions != nil {
  1514  				stageContainer = o.ContainerOptions
  1515  			}
  1516  			stageSidecars = o.Sidecars
  1517  			stageVolumes = o.Volumes
  1518  		}
  1519  		if o.Stash != nil {
  1520  			return nil, errors.New("Stash on stage not yet supported")
  1521  		}
  1522  		if o.Unstash != nil {
  1523  			return nil, errors.New("Unstash on stage not yet supported")
  1524  		}
  1525  	}
  1526  
  1527  	// Don't overwrite the inherited working dir if we don't have one specified here.
  1528  	if params.stage.WorkingDir != nil {
  1529  		params.baseWorkingDir = params.stage.WorkingDir
  1530  	}
  1531  
  1532  	if params.parentContainer != nil {
  1533  		merged, err := MergeContainers(params.parentContainer, stageContainer)
  1534  		if err != nil {
  1535  			return nil, errors.Wrapf(err, "Error merging stage and parent container overrides: %s", err)
  1536  		}
  1537  		stageContainer = merged
  1538  	}
  1539  	stageSidecars = append(stageSidecars, params.parentSidecars...)
  1540  	stageVolumes = append(stageVolumes, params.parentVolumes...)
  1541  
  1542  	env := scopedEnv(params.stage.GetEnv(), params.parentEnv)
  1543  
  1544  	agent := params.stage.Agent.DeepCopy()
  1545  
  1546  	if agent == nil {
  1547  		agent = params.parentAgent.DeepCopy()
  1548  	}
  1549  
  1550  	stepCounter := 0
  1551  	defaultTaskSpec, err := getDefaultTaskSpec(env, stageContainer, params.parentParams.DefaultImage, params.parentParams.VersionsDir)
  1552  	if err != nil {
  1553  		return nil, err
  1554  	}
  1555  
  1556  	if len(params.stage.Steps) > 0 {
  1557  		t := &tektonv1alpha1.Task{
  1558  			TypeMeta: metav1.TypeMeta{
  1559  				APIVersion: TektonAPIVersion,
  1560  				Kind:       "Task",
  1561  			},
  1562  			ObjectMeta: metav1.ObjectMeta{
  1563  				Namespace: params.parentParams.Namespace,
  1564  				Name:      MangleToRfc1035Label(fmt.Sprintf("%s-%s", params.parentParams.PipelineIdentifier, params.stage.Name), params.parentParams.BuildIdentifier),
  1565  				Labels:    util.MergeMaps(params.parentParams.Labels, map[string]string{LabelStageName: params.stage.stageLabelName()}),
  1566  			},
  1567  		}
  1568  		// Only add the default git merge step if this is the first actual step stage - including if the stage is one of
  1569  		// N stages within a parallel stage, and that parallel stage is the first stage in the pipeline
  1570  		if params.previousSiblingStage == nil && isNestedFirstStepsStage(params.enclosingStage) {
  1571  			t.Spec = defaultTaskSpec
  1572  		}
  1573  		prependedSteps, err := builderHomeStep(env, stageContainer, params.parentParams.DefaultImage, params.parentParams.VersionsDir)
  1574  		if err != nil {
  1575  			return nil, err
  1576  		}
  1577  		t.Spec.Steps = append(prependedSteps, t.Spec.Steps...)
  1578  		t.SetDefaults(context.Background())
  1579  
  1580  		ws := &tektonv1alpha1.TaskResource{
  1581  			ResourceDeclaration: tektonv1alpha1.ResourceDeclaration{
  1582  				Name:       "workspace",
  1583  				TargetPath: params.parentParams.SourceDir,
  1584  				Type:       tektonv1alpha1.PipelineResourceTypeGit,
  1585  			},
  1586  		}
  1587  
  1588  		t.Spec.Inputs = &tektonv1alpha1.Inputs{
  1589  			Resources: []tektonv1alpha1.TaskResource{*ws},
  1590  		}
  1591  
  1592  		t.Spec.Outputs = &tektonv1alpha1.Outputs{
  1593  			Resources: []tektonv1alpha1.TaskResource{*ws},
  1594  		}
  1595  
  1596  		for _, sidecar := range stageSidecars {
  1597  			if sidecar != nil {
  1598  				t.Spec.Sidecars = append(t.Spec.Sidecars, tektonv1beta1.Sidecar{
  1599  					Container: *sidecar,
  1600  				})
  1601  			}
  1602  		}
  1603  
  1604  		// We don't want to dupe volumes for the Task if there are multiple steps
  1605  		volumes := make(map[string]corev1.Volume)
  1606  
  1607  		for _, v := range stageVolumes {
  1608  			volumes[v.Name] = *v
  1609  		}
  1610  
  1611  		for _, step := range params.stage.Steps {
  1612  			actualSteps, stepVolumes, newCounter, err := generateSteps(generateStepsParams{
  1613  				stageParams:     params,
  1614  				step:            step,
  1615  				inheritedAgent:  agent.Image,
  1616  				env:             env,
  1617  				parentContainer: stageContainer,
  1618  				stepCounter:     stepCounter,
  1619  			})
  1620  			if err != nil {
  1621  				return nil, err
  1622  			}
  1623  
  1624  			stepCounter = newCounter
  1625  
  1626  			t.Spec.Steps = append(t.Spec.Steps, actualSteps...)
  1627  			for k, v := range stepVolumes {
  1628  				volumes[k] = v
  1629  			}
  1630  		}
  1631  
  1632  		// Avoid nondeterministic results by sorting the keys and appending volumes in that order.
  1633  		var volNames []string
  1634  		for k := range volumes {
  1635  			volNames = append(volNames, k)
  1636  		}
  1637  		sort.Strings(volNames)
  1638  
  1639  		for _, v := range volNames {
  1640  			t.Spec.Volumes = append(t.Spec.Volumes, volumes[v])
  1641  		}
  1642  
  1643  		ts := transformedStage{Stage: params.stage, Task: t, Depth: params.depth, EnclosingStage: params.enclosingStage, PreviousSiblingStage: params.previousSiblingStage}
  1644  		ts.computeWorkspace(params.parentWorkspace)
  1645  		return &ts, nil
  1646  	}
  1647  	if len(params.stage.Stages) > 0 {
  1648  		var tasks []*transformedStage
  1649  		ts := transformedStage{Stage: params.stage, Depth: params.depth, EnclosingStage: params.enclosingStage, PreviousSiblingStage: params.previousSiblingStage}
  1650  		ts.computeWorkspace(params.parentWorkspace)
  1651  
  1652  		for i, nested := range params.stage.Stages {
  1653  			var nestedPreviousSibling *transformedStage
  1654  			if i > 0 {
  1655  				nestedPreviousSibling = tasks[i-1]
  1656  			}
  1657  			nestedTask, err := stageToTask(stageToTaskParams{
  1658  				parentParams:         params.parentParams,
  1659  				stage:                nested,
  1660  				baseWorkingDir:       params.baseWorkingDir,
  1661  				parentEnv:            env,
  1662  				parentAgent:          agent,
  1663  				parentWorkspace:      *ts.Stage.Options.Workspace,
  1664  				parentContainer:      stageContainer,
  1665  				parentSidecars:       stageSidecars,
  1666  				parentVolumes:        stageVolumes,
  1667  				depth:                params.depth + 1,
  1668  				enclosingStage:       &ts,
  1669  				previousSiblingStage: nestedPreviousSibling,
  1670  			})
  1671  			if err != nil {
  1672  				return nil, err
  1673  			}
  1674  			tasks = append(tasks, nestedTask)
  1675  		}
  1676  		ts.Sequential = tasks
  1677  
  1678  		return &ts, nil
  1679  	}
  1680  
  1681  	if len(params.stage.Parallel) > 0 {
  1682  		var tasks []*transformedStage
  1683  		ts := transformedStage{Stage: params.stage, Depth: params.depth, EnclosingStage: params.enclosingStage, PreviousSiblingStage: params.previousSiblingStage}
  1684  		ts.computeWorkspace(params.parentWorkspace)
  1685  
  1686  		for _, nested := range params.stage.Parallel {
  1687  			nestedTask, err := stageToTask(stageToTaskParams{
  1688  				parentParams:    params.parentParams,
  1689  				stage:           nested,
  1690  				baseWorkingDir:  params.baseWorkingDir,
  1691  				parentEnv:       env,
  1692  				parentAgent:     agent,
  1693  				parentWorkspace: *ts.Stage.Options.Workspace,
  1694  				parentContainer: stageContainer,
  1695  				parentSidecars:  stageSidecars,
  1696  				parentVolumes:   stageVolumes,
  1697  				depth:           params.depth + 1,
  1698  				enclosingStage:  &ts,
  1699  			})
  1700  			if err != nil {
  1701  				return nil, err
  1702  			}
  1703  			tasks = append(tasks, nestedTask)
  1704  		}
  1705  		ts.Parallel = tasks
  1706  
  1707  		return &ts, nil
  1708  	}
  1709  	return nil, errors.New("no steps, sequential stages, or parallel stages")
  1710  }
  1711  
  1712  // MergeContainers combines parent and child container structs, with the child overriding the parent.
  1713  func MergeContainers(parentContainer, childContainer *corev1.Container) (*corev1.Container, error) {
  1714  	if parentContainer == nil {
  1715  		return childContainer, nil
  1716  	} else if childContainer == nil {
  1717  		return parentContainer, nil
  1718  	}
  1719  
  1720  	// We need JSON bytes to generate a patch to merge the child containers onto the parent container, so marshal the parent.
  1721  	parentAsJSON, err := json.Marshal(parentContainer)
  1722  	if err != nil {
  1723  		return nil, err
  1724  	}
  1725  	// We need to do a three-way merge to actually combine the parent and child containers, so we need an empty container
  1726  	// as the "original"
  1727  	emptyAsJSON, err := json.Marshal(&corev1.Container{})
  1728  	if err != nil {
  1729  		return nil, err
  1730  	}
  1731  	// Marshal the child to JSON
  1732  	childAsJSON, err := json.Marshal(childContainer)
  1733  	if err != nil {
  1734  		return nil, err
  1735  	}
  1736  
  1737  	// Get the patch meta for Container, which is needed for generating and applying the merge patch.
  1738  	patchSchema, err := strategicpatch.NewPatchMetaFromStruct(parentContainer)
  1739  
  1740  	if err != nil {
  1741  		return nil, err
  1742  	}
  1743  
  1744  	// Create a merge patch, with the empty JSON as the original, the child JSON as the modified, and the parent
  1745  	// JSON as the current - this lets us do a deep merge of the parent and child containers, with awareness of
  1746  	// the "patchMerge" tags.
  1747  	patch, err := strategicpatch.CreateThreeWayMergePatch(emptyAsJSON, childAsJSON, parentAsJSON, patchSchema, true)
  1748  	if err != nil {
  1749  		return nil, err
  1750  	}
  1751  
  1752  	// Actually apply the merge patch to the parent JSON.
  1753  	mergedAsJSON, err := strategicpatch.StrategicMergePatchUsingLookupPatchMeta(parentAsJSON, patch, patchSchema)
  1754  	if err != nil {
  1755  		return nil, err
  1756  	}
  1757  
  1758  	// Unmarshal the merged JSON to a Container pointer, and return it.
  1759  	merged := &corev1.Container{}
  1760  	err = json.Unmarshal(mergedAsJSON, merged)
  1761  	if err != nil {
  1762  		return nil, err
  1763  	}
  1764  
  1765  	return merged, nil
  1766  }
  1767  
  1768  func isNestedFirstStepsStage(enclosingStage *transformedStage) bool {
  1769  	if enclosingStage != nil {
  1770  		if enclosingStage.PreviousSiblingStage != nil {
  1771  			return false
  1772  		}
  1773  		return isNestedFirstStepsStage(enclosingStage.EnclosingStage)
  1774  	}
  1775  	return true
  1776  }
  1777  
  1778  type generateStepsParams struct {
  1779  	stageParams     stageToTaskParams
  1780  	step            Step
  1781  	inheritedAgent  string
  1782  	env             []corev1.EnvVar
  1783  	parentContainer *corev1.Container
  1784  	stepCounter     int
  1785  }
  1786  
  1787  func generateSteps(params generateStepsParams) ([]tektonv1alpha1.Step, map[string]corev1.Volume, int, error) {
  1788  	volumes := make(map[string]corev1.Volume)
  1789  	var steps []tektonv1alpha1.Step
  1790  
  1791  	stepImage := params.inheritedAgent
  1792  	if params.step.GetImage() != "" {
  1793  		stepImage = params.step.GetImage()
  1794  	}
  1795  
  1796  	// Default to ${WorkingDirRoot}/${sourceDir}
  1797  	workingDir := filepath.Join(WorkingDirRoot, params.stageParams.parentParams.SourceDir)
  1798  
  1799  	// Directory we will cd to if it differs from the working dir.
  1800  	targetDir := workingDir
  1801  
  1802  	if params.step.Dir != "" {
  1803  		targetDir = params.step.Dir
  1804  	} else if params.stageParams.baseWorkingDir != nil {
  1805  		targetDir = *(params.stageParams.baseWorkingDir)
  1806  	}
  1807  	// Relative working directories are always just added to /workspace/source, e.g.
  1808  	if !filepath.IsAbs(targetDir) {
  1809  		targetDir = filepath.Join(WorkingDirRoot, params.stageParams.parentParams.SourceDir, targetDir)
  1810  	}
  1811  
  1812  	if params.step.GetCommand() != "" {
  1813  		var targetDirPrefix []string
  1814  		if targetDir != workingDir && !params.stageParams.parentParams.InterpretMode {
  1815  			targetDirPrefix = append(targetDirPrefix, "cd", targetDir, "&&")
  1816  		}
  1817  		c := &corev1.Container{}
  1818  		if params.parentContainer != nil {
  1819  			c = params.parentContainer.DeepCopy()
  1820  		}
  1821  		if params.stageParams.parentParams.PodTemplates != nil && params.stageParams.parentParams.PodTemplates[stepImage] != nil {
  1822  			podTemplate := params.stageParams.parentParams.PodTemplates[stepImage]
  1823  			containers := podTemplate.Spec.Containers
  1824  			for _, volume := range podTemplate.Spec.Volumes {
  1825  				volumes[volume.Name] = volume
  1826  			}
  1827  			if !equality.Semantic.DeepEqual(c, &corev1.Container{}) {
  1828  				merged, err := MergeContainers(&containers[0], c)
  1829  				if err != nil {
  1830  					return nil, nil, params.stepCounter, errors.Wrapf(err, "Error merging pod template and parent container: %s", err)
  1831  				}
  1832  				c = merged
  1833  			} else {
  1834  				c = &containers[0]
  1835  			}
  1836  		} else {
  1837  			c.Image = stepImage
  1838  			c.Command = []string{util.GetSh(), "-c"}
  1839  		}
  1840  
  1841  		resolvedImage, err := versionstream.ResolveDockerImage(params.stageParams.parentParams.VersionsDir, c.Image)
  1842  		if err != nil {
  1843  			log.Logger().Warnf("failed to resolve step image version: %s due to %s", c.Image, err.Error())
  1844  		} else {
  1845  			c.Image = resolvedImage
  1846  		}
  1847  		// Special-casing for commands starting with /kaniko/warmer, which doesn't have sh at all
  1848  		if strings.HasPrefix(params.step.GetCommand(), "/kaniko/warmer") {
  1849  			c.Command = append(targetDirPrefix, params.step.GetCommand())
  1850  			c.Args = params.step.Arguments
  1851  		} else {
  1852  			// If it's /kaniko/executor, use /busybox/sh instead of /bin/sh, and use the debug image
  1853  			if strings.HasPrefix(params.step.GetCommand(), "/kaniko/executor") && strings.Contains(c.Image, "gcr.io/kaniko-project") {
  1854  				if !strings.Contains(c.Image, "debug") {
  1855  					c.Image = strings.Replace(c.Image, "/executor:", "/executor:debug-", 1)
  1856  				}
  1857  				c.Command = []string{"/busybox/sh", "-c"}
  1858  			}
  1859  			cmdStr := params.step.GetCommand()
  1860  			if len(params.step.Arguments) > 0 {
  1861  				cmdStr += " " + strings.Join(params.step.Arguments, " ")
  1862  			}
  1863  			if len(targetDirPrefix) > 0 {
  1864  				cmdStr = strings.Join(targetDirPrefix, " ") + " " + cmdStr
  1865  			}
  1866  			c.Args = []string{cmdStr}
  1867  		}
  1868  		if params.stageParams.parentParams.InterpretMode {
  1869  			c.WorkingDir = targetDir
  1870  		} else {
  1871  			var newCmd []string
  1872  			var newArgs []string
  1873  			for _, c := range c.Command {
  1874  				newCmd = append(newCmd, ReplaceCurlyWithParen(c))
  1875  			}
  1876  			c.Command = newCmd
  1877  			for _, a := range c.Args {
  1878  				newArgs = append(newArgs, ReplaceCurlyWithParen(a))
  1879  			}
  1880  			c.Args = newArgs
  1881  			c.WorkingDir = workingDir
  1882  		}
  1883  		params.stepCounter++
  1884  		if params.step.Name != "" {
  1885  			c.Name = MangleToRfc1035Label(params.step.Name, "")
  1886  		} else {
  1887  			c.Name = "step" + strconv.Itoa(1+params.stepCounter)
  1888  		}
  1889  
  1890  		c.Stdin = false
  1891  		c.TTY = false
  1892  		c.Env = scopedEnv(params.step.Env, scopedEnv(params.env, c.Env))
  1893  
  1894  		steps = append(steps, tektonv1alpha1.Step{
  1895  			Container: *c,
  1896  		})
  1897  	} else if params.step.Loop != nil {
  1898  		for i, v := range params.step.Loop.Values {
  1899  			loopEnv := scopedEnv([]corev1.EnvVar{{Name: params.step.Loop.Variable, Value: v}}, params.env)
  1900  
  1901  			for _, s := range params.step.Loop.Steps {
  1902  				if s.Name != "" {
  1903  					s.Name = s.Name + strconv.Itoa(1+i)
  1904  				}
  1905  				loopSteps, loopVolumes, loopCounter, loopErr := generateSteps(generateStepsParams{
  1906  					stageParams:     params.stageParams,
  1907  					step:            s,
  1908  					inheritedAgent:  stepImage,
  1909  					env:             loopEnv,
  1910  					parentContainer: params.parentContainer,
  1911  					stepCounter:     params.stepCounter,
  1912  				})
  1913  				if loopErr != nil {
  1914  					return nil, nil, loopCounter, loopErr
  1915  				}
  1916  
  1917  				// Bump the step counter to what we got from the loop
  1918  				params.stepCounter = loopCounter
  1919  
  1920  				// Add the loop-generated steps
  1921  				steps = append(steps, loopSteps...)
  1922  
  1923  				// Add any new volumes that may have shown up
  1924  				for k, v := range loopVolumes {
  1925  					volumes[k] = v
  1926  				}
  1927  			}
  1928  		}
  1929  	} else {
  1930  		return nil, nil, params.stepCounter, errors.New("syntactic sugar steps not yet supported")
  1931  	}
  1932  
  1933  	// lets make sure if we've overloaded any environment variables we remove any remaining valueFrom structs
  1934  	// to avoid creating bad Tasks
  1935  	for i, step := range steps {
  1936  		for j, e := range step.Env {
  1937  			if e.Value != "" {
  1938  				steps[i].Env[j].ValueFrom = nil
  1939  			}
  1940  		}
  1941  	}
  1942  
  1943  	return steps, volumes, params.stepCounter, nil
  1944  }
  1945  
  1946  // PipelineRunName returns the pipeline name given the pipeline and build identifier
  1947  func PipelineRunName(pipelineIdentifier string, buildIdentifier string) string {
  1948  	return MangleToRfc1035Label(fmt.Sprintf("%s", pipelineIdentifier), buildIdentifier)
  1949  }
  1950  
  1951  // CRDsFromPipelineParams is how the parameters to GenerateCRDs are specified
  1952  type CRDsFromPipelineParams struct {
  1953  	PipelineIdentifier string
  1954  	BuildIdentifier    string
  1955  	Namespace          string
  1956  	PodTemplates       map[string]*corev1.Pod
  1957  	VersionsDir        string
  1958  	TaskParams         []tektonv1alpha1.ParamSpec
  1959  	SourceDir          string
  1960  	Labels             map[string]string
  1961  	DefaultImage       string
  1962  	InterpretMode      bool
  1963  }
  1964  
  1965  // GenerateCRDs translates the Pipeline structure into the corresponding Pipeline and Task CRDs
  1966  func (j *ParsedPipeline) GenerateCRDs(params CRDsFromPipelineParams) (*tektonv1alpha1.Pipeline, []*tektonv1alpha1.Task, *v1.PipelineStructure, error) {
  1967  	if len(j.Post) != 0 {
  1968  		return nil, nil, nil, errors.New("Post at top level not yet supported")
  1969  	}
  1970  
  1971  	var parentContainer *corev1.Container
  1972  	var parentSidecars []*corev1.Container
  1973  	var parentVolumes []*corev1.Volume
  1974  
  1975  	baseWorkingDir := j.WorkingDir
  1976  
  1977  	if j.Options != nil {
  1978  		o := j.Options
  1979  		if o.Retry > 0 {
  1980  			return nil, nil, nil, errors.New("Retry at top level not yet supported")
  1981  		}
  1982  		parentContainer = o.ContainerOptions
  1983  		parentSidecars = o.Sidecars
  1984  		parentVolumes = o.Volumes
  1985  	}
  1986  
  1987  	p := &tektonv1alpha1.Pipeline{
  1988  		TypeMeta: metav1.TypeMeta{
  1989  			APIVersion: TektonAPIVersion,
  1990  			Kind:       "Pipeline",
  1991  		},
  1992  		ObjectMeta: metav1.ObjectMeta{
  1993  			Namespace: params.Namespace,
  1994  			Name:      PipelineRunName(params.PipelineIdentifier, params.BuildIdentifier),
  1995  		},
  1996  		Spec: tektonv1alpha1.PipelineSpec{
  1997  			Resources: []tektonv1alpha1.PipelineDeclaredResource{
  1998  				{
  1999  					Name: params.PipelineIdentifier,
  2000  					Type: tektonv1alpha1.PipelineResourceTypeGit,
  2001  				},
  2002  			},
  2003  		},
  2004  	}
  2005  
  2006  	p.SetDefaults(context.Background())
  2007  
  2008  	structure := &v1.PipelineStructure{
  2009  		ObjectMeta: metav1.ObjectMeta{
  2010  			Name: p.Name,
  2011  		},
  2012  	}
  2013  
  2014  	if len(params.Labels) > 0 {
  2015  		p.Labels = util.MergeMaps(params.Labels)
  2016  		structure.Labels = util.MergeMaps(params.Labels)
  2017  	}
  2018  
  2019  	var previousStage *transformedStage
  2020  
  2021  	var tasks []*tektonv1alpha1.Task
  2022  
  2023  	baseEnv := j.GetEnv()
  2024  
  2025  	for i, s := range j.Stages {
  2026  		isLastStage := i == len(j.Stages)-1
  2027  
  2028  		stage, err := stageToTask(stageToTaskParams{
  2029  			parentParams:         params,
  2030  			stage:                s,
  2031  			baseWorkingDir:       baseWorkingDir,
  2032  			parentEnv:            baseEnv,
  2033  			parentAgent:          j.Agent,
  2034  			parentWorkspace:      "default",
  2035  			parentContainer:      parentContainer,
  2036  			parentSidecars:       parentSidecars,
  2037  			parentVolumes:        parentVolumes,
  2038  			depth:                0,
  2039  			previousSiblingStage: previousStage,
  2040  		})
  2041  		if err != nil {
  2042  			return nil, nil, nil, err
  2043  		}
  2044  
  2045  		o := stage.Stage.Options
  2046  		if o.RootOptions != nil {
  2047  			if o.Retry > 0 {
  2048  				stage.Stage.Options.Retry = s.Options.Retry
  2049  				log.Logger().Infof("setting retries to %d for stage %s", stage.Stage.Options.Retry, stage.Stage.Name)
  2050  			}
  2051  		}
  2052  		previousStage = stage
  2053  
  2054  		pipelineTasks := createPipelineTasks(stage, p.Spec.Resources[0].Name)
  2055  
  2056  		linearTasks := stage.getLinearTasks()
  2057  
  2058  		for index, lt := range linearTasks {
  2059  			if shouldRemoveWorkspaceOutput(stage, lt.Name, index, len(linearTasks), isLastStage) {
  2060  				pipelineTasks[index].Resources.Outputs = nil
  2061  				lt.Spec.Outputs = nil
  2062  			}
  2063  			if len(lt.Spec.Inputs.Params) == 0 {
  2064  				lt.Spec.Inputs.Params = params.TaskParams
  2065  			}
  2066  		}
  2067  
  2068  		tasks = append(tasks, linearTasks...)
  2069  		p.Spec.Tasks = append(p.Spec.Tasks, pipelineTasks...)
  2070  		structure.Stages = append(structure.Stages, stage.getAllAsPipelineStructureStages()...)
  2071  	}
  2072  
  2073  	return p, tasks, structure, nil
  2074  }
  2075  
  2076  func shouldRemoveWorkspaceOutput(stage *transformedStage, taskName string, index int, tasksLen int, isLastStage bool) bool {
  2077  	if stage.isParallel() {
  2078  		parallelStages := stage.Parallel
  2079  		for _, ps := range parallelStages {
  2080  			if ps.Task != nil && ps.Task.Name == taskName {
  2081  				return true
  2082  			}
  2083  			seq := ps.Sequential
  2084  			if len(seq) > 0 {
  2085  				lastSeq := seq[len(seq)-1]
  2086  				if lastSeq.Task.Name == taskName {
  2087  					return true
  2088  				}
  2089  			}
  2090  
  2091  		}
  2092  	} else if index == tasksLen-1 && isLastStage {
  2093  		return true
  2094  	}
  2095  	return false
  2096  }
  2097  
  2098  func createPipelineTasks(stage *transformedStage, resourceName string) []tektonv1alpha1.PipelineTask {
  2099  	if stage.isSequential() {
  2100  		var pTasks []tektonv1alpha1.PipelineTask
  2101  		for _, nestedStage := range stage.Sequential {
  2102  			pTasks = append(pTasks, createPipelineTasks(nestedStage, resourceName)...)
  2103  		}
  2104  		return pTasks
  2105  	} else if stage.isParallel() {
  2106  		var pTasks []tektonv1alpha1.PipelineTask
  2107  		for _, nestedStage := range stage.Parallel {
  2108  			pTasks = append(pTasks, createPipelineTasks(nestedStage, resourceName)...)
  2109  		}
  2110  		return pTasks
  2111  	} else {
  2112  		pTask := tektonv1alpha1.PipelineTask{
  2113  			Name: stage.Stage.stageLabelName(),
  2114  			TaskRef: &tektonv1alpha1.TaskRef{
  2115  				Name: stage.Task.Name,
  2116  			},
  2117  			Retries: int(stage.Stage.Options.Retry),
  2118  		}
  2119  
  2120  		_, provider := findWorkspaceProvider(stage, stage.getEnclosing(0))
  2121  		var previousStageNames []string
  2122  		for _, previousStage := range findPreviousNonBlockStages(*stage) {
  2123  			previousStageNames = append(previousStageNames, previousStage.PipelineTask.Name)
  2124  		}
  2125  		pTask.Resources = &tektonv1alpha1.PipelineTaskResources{
  2126  			Inputs: []tektonv1alpha1.PipelineTaskInputResource{
  2127  				{
  2128  					Name:     "workspace",
  2129  					Resource: resourceName,
  2130  					From:     provider,
  2131  				},
  2132  			},
  2133  			Outputs: []tektonv1alpha1.PipelineTaskOutputResource{
  2134  				{
  2135  					Name:     "workspace",
  2136  					Resource: resourceName,
  2137  				},
  2138  			},
  2139  		}
  2140  		pTask.RunAfter = previousStageNames
  2141  		stage.PipelineTask = &pTask
  2142  
  2143  		return []tektonv1alpha1.PipelineTask{pTask}
  2144  	}
  2145  }
  2146  
  2147  // Looks for the most recent Task using the desired workspace that was not in the
  2148  // same parallel stage and returns the name of the corresponding Task.
  2149  func findWorkspaceProvider(stage, sibling *transformedStage) (bool, []string) {
  2150  	if *stage.Stage.Options.Workspace == "empty" {
  2151  		return true, nil
  2152  	}
  2153  
  2154  	for sibling != nil {
  2155  		if sibling.isSequential() {
  2156  			found, provider := findWorkspaceProvider(stage, sibling.Sequential[len(sibling.Sequential)-1])
  2157  			if found {
  2158  				return true, provider
  2159  			}
  2160  		} else if sibling.isParallel() {
  2161  			// We don't want to use a workspace from a parallel stage outside of that stage,
  2162  			// but we do need to descend inwards in case stage is in that same stage.
  2163  			if stage.getEnclosing(sibling.Depth) == sibling {
  2164  				for _, nested := range sibling.Parallel {
  2165  					// Pick the parallel branch that has stage
  2166  					if stage.getEnclosing(nested.Depth) == nested {
  2167  						found, provider := findWorkspaceProvider(stage, nested)
  2168  						if found {
  2169  							return true, provider
  2170  						}
  2171  					}
  2172  				}
  2173  			}
  2174  			// TODO: What to do about custom workspaces? Check for erroneous uses specially?
  2175  			// Allow them if only one of the parallel tasks uses the same resource?
  2176  		} else if sibling.PipelineTask != nil {
  2177  			if *sibling.Stage.Options.Workspace == *stage.Stage.Options.Workspace {
  2178  				return true, []string{sibling.PipelineTask.Name}
  2179  			}
  2180  		} else {
  2181  			// We are in a sequential stage and sibling has not had its PipelineTask created.
  2182  			// Check the task before it so we don't use a workspace of a later task.
  2183  		}
  2184  		sibling = sibling.PreviousSiblingStage
  2185  	}
  2186  
  2187  	return false, nil
  2188  }
  2189  
  2190  // Find the end tasks for this stage, traversing down to the end stages of any
  2191  // nested sequential or parallel stages as well.
  2192  func findEndStages(stage transformedStage) []*transformedStage {
  2193  	if stage.isSequential() {
  2194  		return findEndStages(*stage.Sequential[len(stage.Sequential)-1])
  2195  	} else if stage.isParallel() {
  2196  		var endTasks []*transformedStage
  2197  		for _, pStage := range stage.Parallel {
  2198  			endTasks = append(endTasks, findEndStages(*pStage)...)
  2199  		}
  2200  		return endTasks
  2201  	} else {
  2202  		return []*transformedStage{&stage}
  2203  	}
  2204  }
  2205  
  2206  // Find the tasks that run immediately before this stage, not including
  2207  // sequential or parallel wrapper stages.
  2208  func findPreviousNonBlockStages(stage transformedStage) []*transformedStage {
  2209  	if stage.PreviousSiblingStage != nil {
  2210  		return findEndStages(*stage.PreviousSiblingStage)
  2211  	} else if stage.EnclosingStage != nil {
  2212  		return findPreviousNonBlockStages(*stage.EnclosingStage)
  2213  	} else {
  2214  		return []*transformedStage{}
  2215  	}
  2216  }
  2217  
  2218  // Return the stage that encloses this stage at the given depth, or nil if there is no such stage.
  2219  // Depth must be >= 0. Returns the stage itself if depth == stage.Depth
  2220  func (ts *transformedStage) getEnclosing(depth int8) *transformedStage {
  2221  	if ts.Depth == depth {
  2222  		return ts
  2223  	} else if ts.EnclosingStage == nil {
  2224  		return nil
  2225  	} else {
  2226  		return ts.EnclosingStage.getEnclosing(depth)
  2227  	}
  2228  }
  2229  
  2230  func findDuplicates(names []string) *apis.FieldError {
  2231  	// Count members
  2232  	counts := make(map[string]int)
  2233  	mangled := make(map[string]string)
  2234  	for _, v := range names {
  2235  		counts[MangleToRfc1035Label(v, "")]++
  2236  		mangled[v] = MangleToRfc1035Label(v, "")
  2237  	}
  2238  
  2239  	var duplicateNames []string
  2240  	for k, v := range mangled {
  2241  		if counts[v] > 1 {
  2242  			duplicateNames = append(duplicateNames, "'"+k+"'")
  2243  		}
  2244  	}
  2245  
  2246  	if len(duplicateNames) > 0 {
  2247  		// Avoid nondeterminism in error messages
  2248  		sort.Strings(duplicateNames)
  2249  		return &apis.FieldError{
  2250  			Message: "Stage names must be unique",
  2251  			Details: "The following stage names are used more than once: " + strings.Join(duplicateNames, ", "),
  2252  		}
  2253  	}
  2254  	return nil
  2255  }
  2256  
  2257  func validateStageNames(j *ParsedPipeline) (err *apis.FieldError) {
  2258  	var validate func(stages []Stage, stageNames *[]string)
  2259  	validate = func(stages []Stage, stageNames *[]string) {
  2260  
  2261  		for _, stage := range stages {
  2262  			*stageNames = append(*stageNames, stage.Name)
  2263  			if len(stage.Stages) > 0 {
  2264  				validate(stage.Stages, stageNames)
  2265  			}
  2266  		}
  2267  
  2268  	}
  2269  	var names []string
  2270  
  2271  	validate(j.Stages, &names)
  2272  
  2273  	err = findDuplicates(names)
  2274  
  2275  	return
  2276  }
  2277  
  2278  func builderHomeStep(envs []corev1.EnvVar, parentContainer *corev1.Container, defaultImage string, versionsDir string) ([]tektonv1alpha1.Step, error) {
  2279  	var err error
  2280  	image := defaultImage
  2281  	if image == "" {
  2282  		image = os.Getenv("BUILDER_JX_IMAGE")
  2283  		if image == "" {
  2284  			image, err = versionstream.ResolveDockerImage(versionsDir, GitMergeImage)
  2285  			if err != nil {
  2286  				return []tektonv1alpha1.Step{}, err
  2287  			}
  2288  		}
  2289  	}
  2290  
  2291  	builderHomeContainer := &corev1.Container{
  2292  		Name:       "setup-builder-home",
  2293  		Image:      image,
  2294  		Command:    []string{util.GetSh(), "-c"},
  2295  		Args:       []string{`[ -d /builder/home ] || mkdir -p /builder && ln -s /tekton/home /builder/home`},
  2296  		WorkingDir: "/workspace/source",
  2297  		Env:        envs,
  2298  	}
  2299  
  2300  	if parentContainer != nil {
  2301  		mergedHome, err := MergeContainers(parentContainer, builderHomeContainer)
  2302  		if err != nil {
  2303  			return []tektonv1alpha1.Step{}, err
  2304  		}
  2305  		builderHomeContainer = mergedHome
  2306  	}
  2307  
  2308  	return []tektonv1alpha1.Step{{
  2309  		Container: *builderHomeContainer,
  2310  	}}, nil
  2311  }
  2312  
  2313  // todo JR lets remove this when we switch tekton to using git merge type pipelineresources
  2314  func getDefaultTaskSpec(envs []corev1.EnvVar, parentContainer *corev1.Container, defaultImage string, versionsDir string) (tektonv1alpha1.TaskSpec, error) {
  2315  	var err error
  2316  	image := defaultImage
  2317  	if image == "" {
  2318  		image = os.Getenv("BUILDER_JX_IMAGE")
  2319  		if image == "" {
  2320  			image, err = versionstream.ResolveDockerImage(versionsDir, GitMergeImage)
  2321  			if err != nil {
  2322  				return tektonv1alpha1.TaskSpec{}, err
  2323  			}
  2324  		}
  2325  	}
  2326  
  2327  	childContainer := &corev1.Container{
  2328  		Name:       "git-merge",
  2329  		Image:      image,
  2330  		Command:    []string{"jx"},
  2331  		Args:       []string{"step", "git", "merge", "--verbose"},
  2332  		WorkingDir: "/workspace/source",
  2333  		Env:        envs,
  2334  	}
  2335  
  2336  	if parentContainer != nil {
  2337  		mergedChild, err := MergeContainers(parentContainer, childContainer)
  2338  		if err != nil {
  2339  			return tektonv1alpha1.TaskSpec{}, err
  2340  		}
  2341  		childContainer = mergedChild
  2342  	}
  2343  
  2344  	return tektonv1alpha1.TaskSpec{
  2345  		TaskSpec: tektonv1beta1.TaskSpec{
  2346  			Steps: []tektonv1alpha1.Step{
  2347  				{
  2348  					Container: *childContainer,
  2349  				},
  2350  			},
  2351  		},
  2352  	}, nil
  2353  }
  2354  
  2355  // HasNonStepOverrides returns true if this override contains configuration like agent, containerOptions, or volumes.
  2356  func (p *PipelineOverride) HasNonStepOverrides() bool {
  2357  	return p.ContainerOptions != nil || p.Agent != nil || len(p.Volumes) > 0
  2358  }
  2359  
  2360  // AsStepsSlice returns a possibly empty slice of the step or steps in this override
  2361  func (p *PipelineOverride) AsStepsSlice() []*Step {
  2362  	if p.Step != nil {
  2363  		return []*Step{p.Step}
  2364  	}
  2365  	if len(p.Steps) > 0 {
  2366  		return p.Steps
  2367  	}
  2368  	return []*Step{}
  2369  }
  2370  
  2371  // MatchesPipeline returns true if the pipeline name is specified in the override or no pipeline is specified at all in the override
  2372  func (p *PipelineOverride) MatchesPipeline(name string) bool {
  2373  	if p.Pipeline == "" || strings.EqualFold(p.Pipeline, name) {
  2374  		return true
  2375  	}
  2376  	return false
  2377  }
  2378  
  2379  // MatchesStage returns true if the stage/lifecycle name is specified in the override or no stage/lifecycle is specified at all in the override
  2380  func (p *PipelineOverride) MatchesStage(name string) bool {
  2381  	if p.Stage == "" || p.Stage == name {
  2382  		return true
  2383  	}
  2384  	return false
  2385  }
  2386  
  2387  // ApplyStepOverridesToPipeline applies an individual override to the pipeline, replacing named steps in specified stages (or all stages if
  2388  // no stage name is specified).
  2389  func ApplyStepOverridesToPipeline(pipeline *ParsedPipeline, override *PipelineOverride) *ParsedPipeline {
  2390  	if pipeline == nil || override == nil {
  2391  		return pipeline
  2392  	}
  2393  
  2394  	var newStages []Stage
  2395  	for _, s := range pipeline.Stages {
  2396  		overriddenStage := ApplyStepOverridesToStage(s, override)
  2397  		if !equality.Semantic.DeepEqual(overriddenStage, Stage{}) {
  2398  			newStages = append(newStages, overriddenStage)
  2399  		}
  2400  	}
  2401  	pipeline.Stages = newStages
  2402  
  2403  	return pipeline
  2404  }
  2405  
  2406  func stepPointerSliceToStepSlice(orig []*Step) []Step {
  2407  	var newSteps []Step
  2408  	for _, s := range orig {
  2409  		if s != nil {
  2410  			newSteps = append(newSteps, *s)
  2411  		}
  2412  	}
  2413  
  2414  	return newSteps
  2415  }
  2416  
  2417  // ApplyNonStepOverridesToPipeline applies the non-step configuration from an individual override to the pipeline.
  2418  func ApplyNonStepOverridesToPipeline(pipeline *ParsedPipeline, override *PipelineOverride) *ParsedPipeline {
  2419  	if pipeline == nil || override == nil {
  2420  		return pipeline
  2421  	}
  2422  
  2423  	// Only apply this override to the top-level pipeline if no stage is specified.
  2424  	if override.Stage == "" {
  2425  		if override.Agent != nil {
  2426  			pipeline.Agent = override.Agent
  2427  		}
  2428  		if override.ContainerOptions != nil {
  2429  			containerOptionsCopy := *override.ContainerOptions
  2430  			if pipeline.Options == nil {
  2431  				pipeline.Options = &RootOptions{}
  2432  			}
  2433  			if pipeline.Options.ContainerOptions == nil {
  2434  				pipeline.Options.ContainerOptions = &containerOptionsCopy
  2435  			} else {
  2436  				mergedContainer, err := MergeContainers(pipeline.Options.ContainerOptions, &containerOptionsCopy)
  2437  				if err != nil {
  2438  					log.Logger().Warnf("couldn't merge override container options: %s", err)
  2439  				} else {
  2440  					pipeline.Options.ContainerOptions = mergedContainer
  2441  				}
  2442  			}
  2443  		}
  2444  		if len(override.Sidecars) > 0 {
  2445  			if pipeline.Options == nil {
  2446  				pipeline.Options = &RootOptions{}
  2447  			}
  2448  			pipeline.Options.Sidecars = append(pipeline.Options.Sidecars, override.Sidecars...)
  2449  		}
  2450  		if len(override.Volumes) > 0 {
  2451  			if pipeline.Options == nil {
  2452  				pipeline.Options = &RootOptions{}
  2453  			}
  2454  			pipeline.Options.Volumes = append(pipeline.Options.Volumes, override.Volumes...)
  2455  		}
  2456  	}
  2457  
  2458  	var newStages []Stage
  2459  	for _, s := range pipeline.Stages {
  2460  		overriddenStage := ApplyNonStepOverridesToStage(s, override)
  2461  		if !equality.Semantic.DeepEqual(overriddenStage, Stage{}) {
  2462  			newStages = append(newStages, overriddenStage)
  2463  		}
  2464  	}
  2465  	pipeline.Stages = newStages
  2466  
  2467  	return pipeline
  2468  }
  2469  
  2470  // ApplyNonStepOverridesToStage applies non-step overrides, such as stage agent, containerOptions, and volumes, to this
  2471  // stage and its children.
  2472  func ApplyNonStepOverridesToStage(stage Stage, override *PipelineOverride) Stage {
  2473  	if override == nil {
  2474  		return stage
  2475  	}
  2476  
  2477  	// Since a traditional build pack only has one stage at this point, treat anything that's stage-specific as valid here.
  2478  	if (override.MatchesStage(stage.Name) || stage.Name == DefaultStageNameForBuildPack) && override.Stage != "" {
  2479  		if override.Agent != nil {
  2480  			stage.Agent = override.Agent
  2481  		}
  2482  		if override.ContainerOptions != nil {
  2483  			containerOptionsCopy := *override.ContainerOptions
  2484  			if stage.Options == nil {
  2485  				stage.Options = &StageOptions{
  2486  					RootOptions: &RootOptions{},
  2487  				}
  2488  			}
  2489  			if stage.Options.ContainerOptions == nil {
  2490  				stage.Options.ContainerOptions = &containerOptionsCopy
  2491  			} else {
  2492  				mergedContainer, err := MergeContainers(stage.Options.ContainerOptions, &containerOptionsCopy)
  2493  				if err != nil {
  2494  					log.Logger().Warnf("couldn't merge override container options: %s", err)
  2495  				} else {
  2496  					stage.Options.ContainerOptions = mergedContainer
  2497  				}
  2498  			}
  2499  		}
  2500  
  2501  		if len(override.Sidecars) > 0 {
  2502  			if stage.Options == nil {
  2503  				stage.Options = &StageOptions{
  2504  					RootOptions: &RootOptions{},
  2505  				}
  2506  			}
  2507  			stage.Options.Sidecars = append(stage.Options.Sidecars, override.Sidecars...)
  2508  		}
  2509  
  2510  		if len(override.Volumes) > 0 {
  2511  			if stage.Options == nil {
  2512  				stage.Options = &StageOptions{
  2513  					RootOptions: &RootOptions{},
  2514  				}
  2515  			}
  2516  			stage.Options.Volumes = append(stage.Options.Volumes, override.Volumes...)
  2517  		}
  2518  	}
  2519  	if len(stage.Stages) > 0 {
  2520  		var newStages []Stage
  2521  		for _, s := range stage.Stages {
  2522  			newStages = append(newStages, ApplyNonStepOverridesToStage(s, override))
  2523  		}
  2524  		stage.Stages = newStages
  2525  	}
  2526  	if len(stage.Parallel) > 0 {
  2527  		var newParallel []Stage
  2528  		for _, s := range stage.Parallel {
  2529  			newParallel = append(newParallel, ApplyNonStepOverridesToStage(s, override))
  2530  		}
  2531  		stage.Parallel = newParallel
  2532  	}
  2533  
  2534  	return stage
  2535  }
  2536  
  2537  // ApplyStepOverridesToStage applies a set of overrides to named steps in this stage and its children
  2538  func ApplyStepOverridesToStage(stage Stage, override *PipelineOverride) Stage {
  2539  	if override == nil {
  2540  		return stage
  2541  	}
  2542  
  2543  	if override.MatchesStage(stage.Name) {
  2544  		if len(stage.Steps) > 0 {
  2545  			var newSteps []Step
  2546  			if override.Name != "" {
  2547  				for _, s := range stage.Steps {
  2548  					newSteps = append(newSteps, OverrideStep(s, override)...)
  2549  				}
  2550  			} else {
  2551  				// If no step name was specified but there are steps, just replace all steps in the stage/lifecycle,
  2552  				// or add the new steps before/after the existing steps in the stage/lifecycle
  2553  				if steps := override.AsStepsSlice(); len(steps) > 0 {
  2554  					if override.Type == nil || *override.Type == StepOverrideReplace {
  2555  						newSteps = append(newSteps, stepPointerSliceToStepSlice(steps)...)
  2556  					} else if *override.Type == StepOverrideBefore {
  2557  						newSteps = append(newSteps, stepPointerSliceToStepSlice(steps)...)
  2558  						newSteps = append(newSteps, stage.Steps...)
  2559  					} else if *override.Type == StepOverrideAfter {
  2560  						newSteps = append(newSteps, stage.Steps...)
  2561  						newSteps = append(newSteps, stepPointerSliceToStepSlice(steps)...)
  2562  					}
  2563  				}
  2564  				// If there aren't any steps as well as no step name, then we're removing all steps from this stage/lifecycle,
  2565  				// so just don't add anything to newSteps, and we'll end up returning an empty stage
  2566  			}
  2567  
  2568  			// If newSteps isn't empty, use it for the stage's steps list. Otherwise, if no agent override is specified,
  2569  			// we're removing this stage, so return an empty stage.
  2570  			if len(newSteps) > 0 {
  2571  				stage.Steps = newSteps
  2572  			} else if !override.HasNonStepOverrides() {
  2573  				return Stage{}
  2574  			}
  2575  		}
  2576  	}
  2577  	if len(stage.Stages) > 0 {
  2578  		var newStages []Stage
  2579  		for _, s := range stage.Stages {
  2580  			newStages = append(newStages, ApplyStepOverridesToStage(s, override))
  2581  		}
  2582  		stage.Stages = newStages
  2583  	}
  2584  	if len(stage.Parallel) > 0 {
  2585  		var newParallel []Stage
  2586  		for _, s := range stage.Parallel {
  2587  			newParallel = append(newParallel, ApplyStepOverridesToStage(s, override))
  2588  		}
  2589  		stage.Parallel = newParallel
  2590  	}
  2591  
  2592  	return stage
  2593  }
  2594  
  2595  // OverrideStep overrides an existing step, if it matches the override's name, with the contents of the override. It also
  2596  // recurses into child steps.
  2597  func OverrideStep(step Step, override *PipelineOverride) []Step {
  2598  	if override != nil {
  2599  		if step.Name == override.Name {
  2600  			var newSteps []Step
  2601  
  2602  			if override.Step != nil {
  2603  				if override.Step.Name == "" {
  2604  					override.Step.Name = step.Name
  2605  				}
  2606  				newSteps = append(newSteps, *override.Step)
  2607  			}
  2608  			if override.Steps != nil {
  2609  				for _, s := range override.Steps {
  2610  					newSteps = append(newSteps, *s)
  2611  				}
  2612  			}
  2613  
  2614  			if override.Type == nil || *override.Type == StepOverrideReplace {
  2615  				return newSteps
  2616  			} else if *override.Type == StepOverrideBefore {
  2617  				return append(newSteps, step)
  2618  			} else if *override.Type == StepOverrideAfter {
  2619  				return append([]Step{step}, newSteps...)
  2620  			}
  2621  
  2622  			// Fall back on just returning the original. We shouldn't ever get here.
  2623  			return []Step{step}
  2624  		}
  2625  
  2626  		if len(step.Steps) > 0 {
  2627  			var newSteps []*Step
  2628  			for _, s := range step.Steps {
  2629  				for _, o := range OverrideStep(*s, override) {
  2630  					stepCopy := o
  2631  					newSteps = append(newSteps, &stepCopy)
  2632  				}
  2633  			}
  2634  			step.Steps = newSteps
  2635  		}
  2636  	}
  2637  
  2638  	return []Step{step}
  2639  }
  2640  
  2641  // StringParamValue generates a Tekton ArrayOrString value for the given string
  2642  func StringParamValue(val string) tektonv1alpha1.ArrayOrString {
  2643  	return tektonv1alpha1.ArrayOrString{
  2644  		Type:      tektonv1alpha1.ParamTypeString,
  2645  		StringVal: val,
  2646  	}
  2647  }
  2648  
  2649  // ReplaceCurlyWithParen replaces legacy "${inputs.params.foo}" with "$(inputs.params.foo)"
  2650  func ReplaceCurlyWithParen(input string) string {
  2651  	re := regexp.MustCompile(braceMatchingRegex)
  2652  	matches := re.FindAllStringSubmatch(input, -1)
  2653  	for _, m := range matches {
  2654  		if len(m) >= 3 {
  2655  			input = strings.ReplaceAll(input, m[0], "$("+m[3]+")")
  2656  		}
  2657  	}
  2658  	return input
  2659  }