gitlab.com/jfprevost/gitlab-runner-notlscheck@v11.11.4+incompatible/common/build.go (about)

     1  package common
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"net/url"
     8  	"os"
     9  	"path"
    10  	"path/filepath"
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/sirupsen/logrus"
    16  
    17  	"gitlab.com/gitlab-org/gitlab-runner/helpers"
    18  	"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags"
    19  	"gitlab.com/gitlab-org/gitlab-runner/helpers/tls"
    20  	"gitlab.com/gitlab-org/gitlab-runner/session"
    21  	"gitlab.com/gitlab-org/gitlab-runner/session/terminal"
    22  )
    23  
    24  type GitStrategy int
    25  
    26  const (
    27  	GitClone GitStrategy = iota
    28  	GitFetch
    29  	GitNone
    30  )
    31  
    32  const (
    33  	gitCleanFlagsDefault = "-ffdx"
    34  	gitCleanFlagsNone    = "none"
    35  )
    36  
    37  type SubmoduleStrategy int
    38  
    39  const (
    40  	SubmoduleInvalid SubmoduleStrategy = iota
    41  	SubmoduleNone
    42  	SubmoduleNormal
    43  	SubmoduleRecursive
    44  )
    45  
    46  type BuildRuntimeState string
    47  
    48  const (
    49  	BuildRunStatePending      BuildRuntimeState = "pending"
    50  	BuildRunRuntimeRunning    BuildRuntimeState = "running"
    51  	BuildRunRuntimeFinished   BuildRuntimeState = "finished"
    52  	BuildRunRuntimeCanceled   BuildRuntimeState = "canceled"
    53  	BuildRunRuntimeTerminated BuildRuntimeState = "terminated"
    54  	BuildRunRuntimeTimedout   BuildRuntimeState = "timedout"
    55  )
    56  
    57  type BuildStage string
    58  
    59  const (
    60  	BuildStagePrepareExecutor          BuildStage = "prepare_executor"
    61  	BuildStagePrepare                  BuildStage = "prepare_script"
    62  	BuildStageGetSources               BuildStage = "get_sources"
    63  	BuildStageRestoreCache             BuildStage = "restore_cache"
    64  	BuildStageDownloadArtifacts        BuildStage = "download_artifacts"
    65  	BuildStageUserScript               BuildStage = "build_script"
    66  	BuildStageAfterScript              BuildStage = "after_script"
    67  	BuildStageArchiveCache             BuildStage = "archive_cache"
    68  	BuildStageUploadOnSuccessArtifacts BuildStage = "upload_artifacts_on_success"
    69  	BuildStageUploadOnFailureArtifacts BuildStage = "upload_artifacts_on_failure"
    70  )
    71  
    72  type Build struct {
    73  	JobResponse `yaml:",inline"`
    74  
    75  	SystemInterrupt  chan os.Signal `json:"-" yaml:"-"`
    76  	RootDir          string         `json:"-" yaml:"-"`
    77  	BuildDir         string         `json:"-" yaml:"-"`
    78  	CacheDir         string         `json:"-" yaml:"-"`
    79  	Hostname         string         `json:"-" yaml:"-"`
    80  	Runner           *RunnerConfig  `json:"runner"`
    81  	ExecutorData     ExecutorData
    82  	ExecutorFeatures FeaturesInfo `json:"-" yaml:"-"`
    83  
    84  	// Unique ID for all running builds on this runner
    85  	RunnerID int `json:"runner_id"`
    86  
    87  	// Unique ID for all running builds on this runner and this project
    88  	ProjectRunnerID int `json:"project_runner_id"`
    89  
    90  	CurrentStage BuildStage
    91  	CurrentState BuildRuntimeState
    92  
    93  	Session *session.Session
    94  
    95  	executorStageResolver func() ExecutorStage
    96  	logger                BuildLogger
    97  	allVariables          JobVariables
    98  
    99  	createdAt time.Time
   100  }
   101  
   102  func (b *Build) Log() *logrus.Entry {
   103  	return b.Runner.Log().WithField("job", b.ID).WithField("project", b.JobInfo.ProjectID)
   104  }
   105  
   106  func (b *Build) ProjectUniqueName() string {
   107  	return fmt.Sprintf("runner-%s-project-%d-concurrent-%d",
   108  		b.Runner.ShortDescription(), b.JobInfo.ProjectID, b.ProjectRunnerID)
   109  }
   110  
   111  func (b *Build) ProjectSlug() (string, error) {
   112  	url, err := url.Parse(b.GitInfo.RepoURL)
   113  	if err != nil {
   114  		return "", err
   115  	}
   116  	if url.Host == "" {
   117  		return "", errors.New("only URI reference supported")
   118  	}
   119  
   120  	slug := url.Path
   121  	slug = strings.TrimSuffix(slug, ".git")
   122  	slug = path.Clean(slug)
   123  	if slug == "." {
   124  		return "", errors.New("invalid path")
   125  	}
   126  	if strings.Contains(slug, "..") {
   127  		return "", errors.New("it doesn't look like a valid path")
   128  	}
   129  	return slug, nil
   130  }
   131  
   132  func (b *Build) ProjectUniqueDir(sharedDir bool) string {
   133  	dir, err := b.ProjectSlug()
   134  	if err != nil {
   135  		dir = fmt.Sprintf("project-%d", b.JobInfo.ProjectID)
   136  	}
   137  
   138  	// for shared dirs path is constructed like this:
   139  	// <some-path>/runner-short-id/concurrent-id/group-name/project-name/
   140  	// ex.<some-path>/01234567/0/group/repo/
   141  	if sharedDir {
   142  		dir = path.Join(
   143  			fmt.Sprintf("%s", b.Runner.ShortDescription()),
   144  			fmt.Sprintf("%d", b.ProjectRunnerID),
   145  			dir,
   146  		)
   147  	}
   148  	return dir
   149  }
   150  
   151  func (b *Build) FullProjectDir() string {
   152  	return helpers.ToSlash(b.BuildDir)
   153  }
   154  
   155  func (b *Build) TmpProjectDir() string {
   156  	return helpers.ToSlash(b.BuildDir) + ".tmp"
   157  }
   158  
   159  func (b *Build) getCustomBuildDir(rootDir, overrideKey string, customBuildDirEnabled, sharedDir bool) (string, error) {
   160  	dir := b.GetAllVariables().Get(overrideKey)
   161  	if dir == "" {
   162  		return path.Join(rootDir, b.ProjectUniqueDir(sharedDir)), nil
   163  	}
   164  
   165  	if !customBuildDirEnabled {
   166  		return "", MakeBuildError("setting %s is not allowed, enable `custom_build_dir` feature", overrideKey)
   167  	}
   168  
   169  	if !strings.HasPrefix(dir, rootDir) {
   170  		return "", MakeBuildError("the %s=%q has to be within %q",
   171  			overrideKey, dir, rootDir)
   172  	}
   173  
   174  	return dir, nil
   175  }
   176  
   177  func (b *Build) StartBuild(rootDir, cacheDir string, customBuildDirEnabled, sharedDir bool) error {
   178  	var err error
   179  
   180  	// We set RootDir and invalidate variables
   181  	// to be able to use CI_BUILDS_DIR
   182  	b.RootDir = rootDir
   183  	b.CacheDir = path.Join(cacheDir, b.ProjectUniqueDir(false))
   184  	b.refreshAllVariables()
   185  
   186  	b.BuildDir, err = b.getCustomBuildDir(b.RootDir, "GIT_CLONE_PATH", customBuildDirEnabled, sharedDir)
   187  	if err != nil {
   188  		return err
   189  	}
   190  
   191  	// We invalidate variables to be able to use
   192  	// CI_CACHE_DIR and CI_PROJECT_DIR
   193  	b.refreshAllVariables()
   194  	return nil
   195  }
   196  
   197  func (b *Build) executeStage(ctx context.Context, buildStage BuildStage, executor Executor) error {
   198  	b.CurrentStage = buildStage
   199  
   200  	b.Log().WithField("build_stage", buildStage).Debug("Executing build stage")
   201  
   202  	shell := executor.Shell()
   203  	if shell == nil {
   204  		return errors.New("No shell defined")
   205  	}
   206  
   207  	script, err := GenerateShellScript(buildStage, *shell)
   208  	if err != nil {
   209  		return err
   210  	}
   211  
   212  	// Nothing to execute
   213  	if script == "" {
   214  		return nil
   215  	}
   216  
   217  	cmd := ExecutorCommand{
   218  		Context: ctx,
   219  		Script:  script,
   220  		Stage:   buildStage,
   221  	}
   222  
   223  	switch buildStage {
   224  	case BuildStageUserScript, BuildStageAfterScript: // use custom build environment
   225  		cmd.Predefined = false
   226  	default: // all other stages use a predefined build environment
   227  		cmd.Predefined = true
   228  	}
   229  
   230  	section := helpers.BuildSection{
   231  		Name:        string(buildStage),
   232  		SkipMetrics: !b.JobResponse.Features.TraceSections,
   233  		Run:         func() error { return executor.Run(cmd) },
   234  	}
   235  	return section.Execute(&b.logger)
   236  }
   237  
   238  func (b *Build) executeUploadArtifacts(ctx context.Context, state error, executor Executor) (err error) {
   239  	if state == nil {
   240  		return b.executeStage(ctx, BuildStageUploadOnSuccessArtifacts, executor)
   241  	}
   242  
   243  	return b.executeStage(ctx, BuildStageUploadOnFailureArtifacts, executor)
   244  }
   245  
   246  func (b *Build) executeScript(ctx context.Context, executor Executor) error {
   247  	// Prepare stage
   248  	err := b.executeStage(ctx, BuildStagePrepare, executor)
   249  
   250  	if err == nil {
   251  		err = b.attemptExecuteStage(ctx, BuildStageGetSources, executor, b.GetGetSourcesAttempts())
   252  	}
   253  	if err == nil {
   254  		err = b.attemptExecuteStage(ctx, BuildStageRestoreCache, executor, b.GetRestoreCacheAttempts())
   255  	}
   256  	if err == nil {
   257  		err = b.attemptExecuteStage(ctx, BuildStageDownloadArtifacts, executor, b.GetDownloadArtifactsAttempts())
   258  	}
   259  
   260  	if err == nil {
   261  		// Execute user build script (before_script + script)
   262  		err = b.executeStage(ctx, BuildStageUserScript, executor)
   263  
   264  		// Execute after script (after_script)
   265  		timeoutContext, timeoutCancel := context.WithTimeout(ctx, AfterScriptTimeout)
   266  		defer timeoutCancel()
   267  
   268  		b.executeStage(timeoutContext, BuildStageAfterScript, executor)
   269  	}
   270  
   271  	// Execute post script (cache store, artifacts upload)
   272  	if err == nil {
   273  		err = b.executeStage(ctx, BuildStageArchiveCache, executor)
   274  	}
   275  
   276  	uploadError := b.executeUploadArtifacts(ctx, err, executor)
   277  
   278  	// Use job's error as most important
   279  	if err != nil {
   280  		return err
   281  	}
   282  
   283  	// Otherwise, use uploadError
   284  	return uploadError
   285  }
   286  
   287  func (b *Build) attemptExecuteStage(ctx context.Context, buildStage BuildStage, executor Executor, attempts int) (err error) {
   288  	if attempts < 1 || attempts > 10 {
   289  		return fmt.Errorf("Number of attempts out of the range [1, 10] for stage: %s", buildStage)
   290  	}
   291  	for attempt := 0; attempt < attempts; attempt++ {
   292  		if err = b.executeStage(ctx, buildStage, executor); err == nil {
   293  			return
   294  		}
   295  	}
   296  	return
   297  }
   298  
   299  func (b *Build) GetBuildTimeout() time.Duration {
   300  	buildTimeout := b.RunnerInfo.Timeout
   301  	if buildTimeout <= 0 {
   302  		buildTimeout = DefaultTimeout
   303  	}
   304  	return time.Duration(buildTimeout) * time.Second
   305  }
   306  
   307  func (b *Build) handleError(err error) error {
   308  	switch err {
   309  	case context.Canceled:
   310  		b.CurrentState = BuildRunRuntimeCanceled
   311  		return &BuildError{Inner: errors.New("canceled")}
   312  
   313  	case context.DeadlineExceeded:
   314  		b.CurrentState = BuildRunRuntimeTimedout
   315  		return &BuildError{
   316  			Inner:         fmt.Errorf("execution took longer than %v seconds", b.GetBuildTimeout()),
   317  			FailureReason: JobExecutionTimeout,
   318  		}
   319  
   320  	default:
   321  		b.CurrentState = BuildRunRuntimeFinished
   322  		return err
   323  	}
   324  }
   325  
   326  func (b *Build) run(ctx context.Context, executor Executor) (err error) {
   327  	b.CurrentState = BuildRunRuntimeRunning
   328  
   329  	buildFinish := make(chan error, 1)
   330  
   331  	runContext, runCancel := context.WithCancel(context.Background())
   332  	defer runCancel()
   333  
   334  	if term, ok := executor.(terminal.InteractiveTerminal); b.Session != nil && ok {
   335  		b.Session.SetInteractiveTerminal(term)
   336  	}
   337  
   338  	// Run build script
   339  	go func() {
   340  		buildFinish <- b.executeScript(runContext, executor)
   341  	}()
   342  
   343  	// Wait for signals: cancel, timeout, abort or finish
   344  	b.Log().Debugln("Waiting for signals...")
   345  	select {
   346  	case <-ctx.Done():
   347  		err = b.handleError(ctx.Err())
   348  
   349  	case signal := <-b.SystemInterrupt:
   350  		err = fmt.Errorf("aborted: %v", signal)
   351  		b.CurrentState = BuildRunRuntimeTerminated
   352  
   353  	case err = <-buildFinish:
   354  		b.CurrentState = BuildRunRuntimeFinished
   355  		return err
   356  	}
   357  
   358  	b.Log().WithError(err).Debugln("Waiting for build to finish...")
   359  
   360  	// Wait till we receive that build did finish
   361  	runCancel()
   362  	<-buildFinish
   363  	return err
   364  }
   365  
   366  func (b *Build) retryCreateExecutor(options ExecutorPrepareOptions, provider ExecutorProvider, logger BuildLogger) (executor Executor, err error) {
   367  	for tries := 0; tries < PreparationRetries; tries++ {
   368  		executor = provider.Create()
   369  		if executor == nil {
   370  			err = errors.New("failed to create executor")
   371  			return
   372  		}
   373  
   374  		b.executorStageResolver = executor.GetCurrentStage
   375  
   376  		err = executor.Prepare(options)
   377  		if err == nil {
   378  			break
   379  		}
   380  		if executor != nil {
   381  			executor.Cleanup()
   382  			executor = nil
   383  		}
   384  		if _, ok := err.(*BuildError); ok {
   385  			break
   386  		} else if options.Context.Err() != nil {
   387  			return nil, b.handleError(options.Context.Err())
   388  		}
   389  
   390  		logger.SoftErrorln("Preparation failed:", err)
   391  		logger.Infoln("Will be retried in", PreparationRetryInterval, "...")
   392  		time.Sleep(PreparationRetryInterval)
   393  	}
   394  	return
   395  }
   396  
   397  func (b *Build) waitForTerminal(ctx context.Context, timeout time.Duration) error {
   398  	if b.Session == nil || !b.Session.Connected() {
   399  		return nil
   400  	}
   401  
   402  	timeout = b.getTerminalTimeout(ctx, timeout)
   403  
   404  	b.logger.Infoln(
   405  		fmt.Sprintf(
   406  			"Terminal is connected, will time out in %s...",
   407  			// TODO: switch to timeout.Round(time.Second) after upgrading to Go 1.9+
   408  			roundDuration(timeout, time.Second),
   409  		),
   410  	)
   411  
   412  	select {
   413  	case <-ctx.Done():
   414  		err := b.Session.Kill()
   415  		if err != nil {
   416  			b.Log().WithError(err).Warn("Failed to kill session")
   417  		}
   418  		return errors.New("build cancelled, killing session")
   419  	case <-time.After(timeout):
   420  		err := fmt.Errorf(
   421  			"Terminal session timed out (maximum time allowed - %s)",
   422  			// TODO: switch to timeout.Round(time.Second) after upgrading to Go 1.9+
   423  			roundDuration(timeout, time.Second),
   424  		)
   425  		b.logger.Infoln(err.Error())
   426  		b.Session.TimeoutCh <- err
   427  		return err
   428  	case err := <-b.Session.DisconnectCh:
   429  		b.logger.Infoln("Terminal disconnected")
   430  		return fmt.Errorf("terminal disconnected: %v", err)
   431  	case signal := <-b.SystemInterrupt:
   432  		b.logger.Infoln("Terminal disconnected")
   433  		err := b.Session.Kill()
   434  		if err != nil {
   435  			b.Log().WithError(err).Warn("Failed to kill session")
   436  		}
   437  		return fmt.Errorf("terminal disconnected by system signal: %v", signal)
   438  	}
   439  }
   440  
   441  // getTerminalTimeout checks if the the job timeout comes before the
   442  // configured terminal timeout.
   443  func (b *Build) getTerminalTimeout(ctx context.Context, timeout time.Duration) time.Duration {
   444  	expiryTime, _ := ctx.Deadline()
   445  
   446  	if expiryTime.Before(time.Now().Add(timeout)) {
   447  		timeout = expiryTime.Sub(time.Now())
   448  	}
   449  
   450  	return timeout
   451  }
   452  
   453  func (b *Build) setTraceStatus(trace JobTrace, err error) {
   454  	logger := b.logger.WithFields(logrus.Fields{
   455  		"duration": b.Duration(),
   456  	})
   457  
   458  	if err == nil {
   459  		logger.Infoln("Job succeeded")
   460  		trace.Success()
   461  
   462  		return
   463  	}
   464  
   465  	if buildError, ok := err.(*BuildError); ok {
   466  		logger.SoftErrorln("Job failed:", err)
   467  
   468  		failureReason := buildError.FailureReason
   469  		if failureReason == "" {
   470  			failureReason = ScriptFailure
   471  		}
   472  
   473  		trace.Fail(err, failureReason)
   474  
   475  		return
   476  	}
   477  
   478  	logger.Errorln("Job failed (system failure):", err)
   479  	trace.Fail(err, RunnerSystemFailure)
   480  }
   481  
   482  func (b *Build) CurrentExecutorStage() ExecutorStage {
   483  	if b.executorStageResolver == nil {
   484  		b.executorStageResolver = func() ExecutorStage {
   485  			return ExecutorStage("")
   486  		}
   487  	}
   488  
   489  	return b.executorStageResolver()
   490  }
   491  
   492  func (b *Build) Run(globalConfig *Config, trace JobTrace) (err error) {
   493  	var executor Executor
   494  
   495  	b.logger = NewBuildLogger(trace, b.Log())
   496  	b.logger.Println("Running with", AppVersion.Line())
   497  	if b.Runner != nil && b.Runner.ShortDescription() != "" {
   498  		b.logger.Println("  on", b.Runner.Name, b.Runner.ShortDescription())
   499  	}
   500  
   501  	b.CurrentState = BuildRunStatePending
   502  
   503  	defer func() {
   504  		b.setTraceStatus(trace, err)
   505  
   506  		if executor != nil {
   507  			executor.Cleanup()
   508  		}
   509  	}()
   510  
   511  	ctx, cancel := context.WithTimeout(context.Background(), b.GetBuildTimeout())
   512  	defer cancel()
   513  
   514  	trace.SetCancelFunc(cancel)
   515  	trace.SetMasked(b.GetAllVariables().Masked())
   516  
   517  	options := ExecutorPrepareOptions{
   518  		Config:  b.Runner,
   519  		Build:   b,
   520  		Trace:   trace,
   521  		User:    globalConfig.User,
   522  		Context: ctx,
   523  	}
   524  
   525  	provider := GetExecutor(b.Runner.Executor)
   526  	if provider == nil {
   527  		return errors.New("executor not found")
   528  	}
   529  
   530  	provider.GetFeatures(&b.ExecutorFeatures)
   531  
   532  	section := helpers.BuildSection{
   533  		Name:        string(BuildStagePrepareExecutor),
   534  		SkipMetrics: !b.JobResponse.Features.TraceSections,
   535  		Run: func() error {
   536  			executor, err = b.retryCreateExecutor(options, provider, b.logger)
   537  			return err
   538  		},
   539  	}
   540  	err = section.Execute(&b.logger)
   541  
   542  	if err == nil {
   543  		err = b.run(ctx, executor)
   544  		if err := b.waitForTerminal(ctx, globalConfig.SessionServer.GetSessionTimeout()); err != nil {
   545  			b.Log().WithError(err).Debug("Stopped waiting for terminal")
   546  		}
   547  	}
   548  
   549  	if executor != nil {
   550  		executor.Finish(err)
   551  	}
   552  
   553  	return err
   554  }
   555  
   556  func (b *Build) String() string {
   557  	return helpers.ToYAML(b)
   558  }
   559  
   560  func (b *Build) GetDefaultVariables() JobVariables {
   561  	return JobVariables{
   562  		{Key: "CI_BUILDS_DIR", Value: filepath.FromSlash(b.RootDir), Public: true, Internal: true, File: false},
   563  		{Key: "CI_PROJECT_DIR", Value: filepath.FromSlash(b.FullProjectDir()), Public: true, Internal: true, File: false},
   564  		{Key: "CI_CONCURRENT_ID", Value: strconv.Itoa(b.RunnerID), Public: true, Internal: true, File: false},
   565  		{Key: "CI_CONCURRENT_PROJECT_ID", Value: strconv.Itoa(b.ProjectRunnerID), Public: true, Internal: true, File: false},
   566  		{Key: "CI_SERVER", Value: "yes", Public: true, Internal: true, File: false},
   567  	}
   568  }
   569  
   570  func (b *Build) GetDefaultFeatureFlagsVariables() JobVariables {
   571  	variables := make(JobVariables, 0)
   572  	for _, featureFlag := range featureflags.GetAll() {
   573  		variables = append(variables, JobVariable{
   574  			Key:      featureFlag.Name,
   575  			Value:    featureFlag.DefaultValue,
   576  			Public:   true,
   577  			Internal: true,
   578  			File:     false,
   579  		})
   580  	}
   581  
   582  	return variables
   583  }
   584  
   585  func (b *Build) GetSharedEnvVariable() JobVariable {
   586  	env := JobVariable{Value: "true", Public: true, Internal: true, File: false}
   587  	if b.IsSharedEnv() {
   588  		env.Key = "CI_SHARED_ENVIRONMENT"
   589  	} else {
   590  		env.Key = "CI_DISPOSABLE_ENVIRONMENT"
   591  	}
   592  
   593  	return env
   594  }
   595  
   596  func (b *Build) GetTLSVariables(caFile, certFile, keyFile string) JobVariables {
   597  	variables := JobVariables{}
   598  
   599  	if b.TLSCAChain != "" {
   600  		variables = append(variables, JobVariable{
   601  			Key:      caFile,
   602  			Value:    b.TLSCAChain,
   603  			Public:   true,
   604  			Internal: true,
   605  			File:     true,
   606  		})
   607  	}
   608  
   609  	if b.TLSAuthCert != "" && b.TLSAuthKey != "" {
   610  		variables = append(variables, JobVariable{
   611  			Key:      certFile,
   612  			Value:    b.TLSAuthCert,
   613  			Public:   true,
   614  			Internal: true,
   615  			File:     true,
   616  		})
   617  
   618  		variables = append(variables, JobVariable{
   619  			Key:      keyFile,
   620  			Value:    b.TLSAuthKey,
   621  			Internal: true,
   622  			File:     true,
   623  		})
   624  	}
   625  
   626  	return variables
   627  }
   628  
   629  func (b *Build) GetCITLSVariables() JobVariables {
   630  	return b.GetTLSVariables(tls.VariableCAFile, tls.VariableCertFile, tls.VariableKeyFile)
   631  }
   632  
   633  func (b *Build) GetGitTLSVariables() JobVariables {
   634  	return b.GetTLSVariables("GIT_SSL_CAINFO", "GIT_SSL_CERT", "GIT_SSL_KEY")
   635  }
   636  
   637  func (b *Build) IsSharedEnv() bool {
   638  	return b.ExecutorFeatures.Shared
   639  }
   640  
   641  func (b *Build) refreshAllVariables() {
   642  	b.allVariables = nil
   643  }
   644  
   645  func (b *Build) GetAllVariables() JobVariables {
   646  	if b.allVariables != nil {
   647  		return b.allVariables
   648  	}
   649  
   650  	variables := make(JobVariables, 0)
   651  	variables = append(variables, b.GetDefaultFeatureFlagsVariables()...)
   652  	if b.Runner != nil {
   653  		variables = append(variables, b.Runner.GetVariables()...)
   654  	}
   655  	variables = append(variables, b.GetDefaultVariables()...)
   656  	variables = append(variables, b.GetCITLSVariables()...)
   657  	variables = append(variables, b.Variables...)
   658  	variables = append(variables, b.GetSharedEnvVariable())
   659  	variables = append(variables, AppVersion.Variables()...)
   660  
   661  	b.allVariables = variables.Expand()
   662  	return b.allVariables
   663  }
   664  
   665  // GetRemoteURL checks if the default clone URL is overwritten by the runner
   666  // configuration option: 'CloneURL'. If it is, we use that to create the clone
   667  // URL.
   668  func (b *Build) GetRemoteURL() string {
   669  	cloneURL := strings.TrimRight(b.Runner.CloneURL, "/")
   670  
   671  	if !strings.HasPrefix(cloneURL, "http") {
   672  		return b.GitInfo.RepoURL
   673  	}
   674  
   675  	variables := b.GetAllVariables()
   676  	ciJobToken := variables.Get("CI_JOB_TOKEN")
   677  	ciProjectPath := variables.Get("CI_PROJECT_PATH")
   678  
   679  	splits := strings.SplitAfterN(cloneURL, "://", 2)
   680  
   681  	return fmt.Sprintf("%sgitlab-ci-token:%s@%s/%s.git", splits[0], ciJobToken, splits[1], ciProjectPath)
   682  }
   683  
   684  // GetGitDepth is deprecated and will be removed in 12.0, use build.GitInfo.Depth instead
   685  // TODO: Remove in 12.0
   686  func (b *Build) GetGitDepth() string {
   687  	return b.GetAllVariables().Get("GIT_DEPTH")
   688  }
   689  
   690  func (b *Build) GetGitStrategy() GitStrategy {
   691  	switch b.GetAllVariables().Get("GIT_STRATEGY") {
   692  	case "clone":
   693  		return GitClone
   694  
   695  	case "fetch":
   696  		return GitFetch
   697  
   698  	case "none":
   699  		return GitNone
   700  
   701  	default:
   702  		if b.AllowGitFetch {
   703  			return GitFetch
   704  		}
   705  
   706  		return GitClone
   707  	}
   708  }
   709  
   710  func (b *Build) GetGitCheckout() bool {
   711  	if b.GetGitStrategy() == GitNone {
   712  		return false
   713  	}
   714  
   715  	strCheckout := b.GetAllVariables().Get("GIT_CHECKOUT")
   716  	if len(strCheckout) == 0 {
   717  		return true
   718  	}
   719  
   720  	checkout, err := strconv.ParseBool(strCheckout)
   721  	if err != nil {
   722  		return true
   723  	}
   724  	return checkout
   725  }
   726  
   727  func (b *Build) GetSubmoduleStrategy() SubmoduleStrategy {
   728  	if b.GetGitStrategy() == GitNone {
   729  		return SubmoduleNone
   730  	}
   731  	switch b.GetAllVariables().Get("GIT_SUBMODULE_STRATEGY") {
   732  	case "normal":
   733  		return SubmoduleNormal
   734  
   735  	case "recursive":
   736  		return SubmoduleRecursive
   737  
   738  	case "none", "":
   739  		// Default (legacy) behavior is to not update/init submodules
   740  		return SubmoduleNone
   741  
   742  	default:
   743  		// Will cause an error in AbstractShell) writeSubmoduleUpdateCmds
   744  		return SubmoduleInvalid
   745  	}
   746  }
   747  
   748  func (b *Build) GetGitCleanFlags() []string {
   749  	flags := b.GetAllVariables().Get("GIT_CLEAN_FLAGS")
   750  	if flags == "" {
   751  		flags = gitCleanFlagsDefault
   752  	}
   753  
   754  	if flags == gitCleanFlagsNone {
   755  		return []string{}
   756  	}
   757  
   758  	return strings.Fields(flags)
   759  }
   760  
   761  func (b *Build) IsDebugTraceEnabled() bool {
   762  	trace, err := strconv.ParseBool(b.GetAllVariables().Get("CI_DEBUG_TRACE"))
   763  	if err != nil {
   764  		trace = false
   765  	}
   766  
   767  	if b.Runner.DebugTraceDisabled {
   768  		if trace == true {
   769  			b.logger.Warningln("CI_DEBUG_TRACE usage is disabled on this Runner")
   770  		}
   771  
   772  		return false
   773  	}
   774  
   775  	return trace
   776  }
   777  
   778  func (b *Build) GetDockerAuthConfig() string {
   779  	return b.GetAllVariables().Get("DOCKER_AUTH_CONFIG")
   780  }
   781  
   782  func (b *Build) GetGetSourcesAttempts() int {
   783  	retries, err := strconv.Atoi(b.GetAllVariables().Get("GET_SOURCES_ATTEMPTS"))
   784  	if err != nil {
   785  		return DefaultGetSourcesAttempts
   786  	}
   787  	return retries
   788  }
   789  
   790  func (b *Build) GetDownloadArtifactsAttempts() int {
   791  	retries, err := strconv.Atoi(b.GetAllVariables().Get("ARTIFACT_DOWNLOAD_ATTEMPTS"))
   792  	if err != nil {
   793  		return DefaultArtifactDownloadAttempts
   794  	}
   795  	return retries
   796  }
   797  
   798  func (b *Build) GetRestoreCacheAttempts() int {
   799  	retries, err := strconv.Atoi(b.GetAllVariables().Get("RESTORE_CACHE_ATTEMPTS"))
   800  	if err != nil {
   801  		return DefaultRestoreCacheAttempts
   802  	}
   803  	return retries
   804  }
   805  
   806  func (b *Build) GetCacheRequestTimeout() int {
   807  	timeout, err := strconv.Atoi(b.GetAllVariables().Get("CACHE_REQUEST_TIMEOUT"))
   808  	if err != nil {
   809  		return DefaultCacheRequestTimeout
   810  	}
   811  	return timeout
   812  }
   813  
   814  func (b *Build) Duration() time.Duration {
   815  	return time.Since(b.createdAt)
   816  }
   817  
   818  func (b *Build) RefspecsAvailable() bool {
   819  	return len(b.GitInfo.Refspecs) > 0
   820  }
   821  
   822  func NewBuild(jobData JobResponse, runnerConfig *RunnerConfig, systemInterrupt chan os.Signal, executorData ExecutorData) (*Build, error) {
   823  	// Attempt to perform a deep copy of the RunnerConfig
   824  	runnerConfigCopy, err := runnerConfig.DeepCopy()
   825  	if err != nil {
   826  		return nil, fmt.Errorf("deep copy of runner config failed: %v", err)
   827  	}
   828  
   829  	return &Build{
   830  		JobResponse:     jobData,
   831  		Runner:          runnerConfigCopy,
   832  		SystemInterrupt: systemInterrupt,
   833  		ExecutorData:    executorData,
   834  		createdAt:       time.Now(),
   835  	}, nil
   836  }
   837  
   838  func (b *Build) IsFeatureFlagOn(name string) bool {
   839  	value := b.GetAllVariables().Get(name)
   840  
   841  	on, err := featureflags.IsOn(value)
   842  	if err != nil {
   843  		logrus.WithError(err).
   844  			WithField("name", name).
   845  			WithField("value", value).
   846  			Error("Error while parsing the value of feature flag")
   847  
   848  		return false
   849  	}
   850  
   851  	return on
   852  }
   853  
   854  func (b *Build) IsLFSSmudgeDisabled() bool {
   855  	disabled, err := strconv.ParseBool(b.GetAllVariables().Get("GIT_LFS_SKIP_SMUDGE"))
   856  	if err != nil {
   857  		return false
   858  	}
   859  
   860  	return disabled
   861  }