github.com/hernad/nomad@v1.6.112/nomad/structs/config/artifact.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package config
     5  
     6  import (
     7  	"fmt"
     8  	"math"
     9  	"time"
    10  
    11  	"github.com/dustin/go-humanize"
    12  	"github.com/hernad/nomad/helper/pointer"
    13  )
    14  
    15  // ArtifactConfig is the configuration specific to the Artifact block
    16  type ArtifactConfig struct {
    17  	// HTTPReadTimeout is the duration in which a download must complete or
    18  	// it will be canceled. Defaults to 30m.
    19  	HTTPReadTimeout *string `hcl:"http_read_timeout"`
    20  
    21  	// HTTPMaxSize is the maximum size of an artifact that will be downloaded.
    22  	// Defaults to 100GB.
    23  	HTTPMaxSize *string `hcl:"http_max_size"`
    24  
    25  	// GCSTimeout is the duration in which a GCS operation must complete or
    26  	// it will be canceled. Defaults to 30m.
    27  	GCSTimeout *string `hcl:"gcs_timeout"`
    28  
    29  	// GitTimeout is the duration in which a git operation must complete or
    30  	// it will be canceled. Defaults to 30m.
    31  	GitTimeout *string `hcl:"git_timeout"`
    32  
    33  	// HgTimeout is the duration in which an hg operation must complete or
    34  	// it will be canceled. Defaults to 30m.
    35  	HgTimeout *string `hcl:"hg_timeout"`
    36  
    37  	// S3Timeout is the duration in which an S3 operation must complete or
    38  	// it will be canceled. Defaults to 30m.
    39  	S3Timeout *string `hcl:"s3_timeout"`
    40  
    41  	// DecompressionFileCountLimit is the maximum number of files that will
    42  	// be decompressed before triggering an error and cancelling the operation.
    43  	//
    44  	// Default is 4006 files.
    45  	DecompressionFileCountLimit *int `hcl:"decompression_file_count_limit"`
    46  
    47  	// DecompressionSizeLimit is the maximum amount of data that will be
    48  	// decompressed before triggering an error and cancelling the operation.
    49  	//
    50  	// Default is 100GB.
    51  	DecompressionSizeLimit *string `hcl:"decompression_size_limit"`
    52  
    53  	// DisableFilesystemIsolation will turn off the security feature where the
    54  	// artifact downloader can write only to the task sandbox directory, and can
    55  	// read only from specific locations on the host filesystem.
    56  	DisableFilesystemIsolation *bool `hcl:"disable_filesystem_isolation"`
    57  
    58  	// SetEnvironmentVariables is a comma-separated list of environment
    59  	// variable names to inherit from the Nomad Client and set in the artifact
    60  	// download sandbox process.
    61  	SetEnvironmentVariables *string `hcl:"set_environment_variables"`
    62  }
    63  
    64  func (a *ArtifactConfig) Copy() *ArtifactConfig {
    65  	if a == nil {
    66  		return nil
    67  	}
    68  	return &ArtifactConfig{
    69  		HTTPReadTimeout:             pointer.Copy(a.HTTPReadTimeout),
    70  		HTTPMaxSize:                 pointer.Copy(a.HTTPMaxSize),
    71  		GCSTimeout:                  pointer.Copy(a.GCSTimeout),
    72  		GitTimeout:                  pointer.Copy(a.GitTimeout),
    73  		HgTimeout:                   pointer.Copy(a.HgTimeout),
    74  		S3Timeout:                   pointer.Copy(a.S3Timeout),
    75  		DecompressionFileCountLimit: pointer.Copy(a.DecompressionFileCountLimit),
    76  		DecompressionSizeLimit:      pointer.Copy(a.DecompressionSizeLimit),
    77  		DisableFilesystemIsolation:  pointer.Copy(a.DisableFilesystemIsolation),
    78  		SetEnvironmentVariables:     pointer.Copy(a.SetEnvironmentVariables),
    79  	}
    80  }
    81  
    82  func (a *ArtifactConfig) Merge(o *ArtifactConfig) *ArtifactConfig {
    83  	switch {
    84  	case a == nil:
    85  		return o.Copy()
    86  	case o == nil:
    87  		return a.Copy()
    88  	default:
    89  		return &ArtifactConfig{
    90  			HTTPReadTimeout:             pointer.Merge(a.HTTPReadTimeout, o.HTTPReadTimeout),
    91  			HTTPMaxSize:                 pointer.Merge(a.HTTPMaxSize, o.HTTPMaxSize),
    92  			GCSTimeout:                  pointer.Merge(a.GCSTimeout, o.GCSTimeout),
    93  			GitTimeout:                  pointer.Merge(a.GitTimeout, o.GitTimeout),
    94  			HgTimeout:                   pointer.Merge(a.HgTimeout, o.HgTimeout),
    95  			S3Timeout:                   pointer.Merge(a.S3Timeout, o.S3Timeout),
    96  			DecompressionFileCountLimit: pointer.Merge(a.DecompressionFileCountLimit, o.DecompressionFileCountLimit),
    97  			DecompressionSizeLimit:      pointer.Merge(a.DecompressionSizeLimit, o.DecompressionSizeLimit),
    98  			DisableFilesystemIsolation:  pointer.Merge(a.DisableFilesystemIsolation, o.DisableFilesystemIsolation),
    99  			SetEnvironmentVariables:     pointer.Merge(a.SetEnvironmentVariables, o.SetEnvironmentVariables),
   100  		}
   101  	}
   102  }
   103  
   104  func (a *ArtifactConfig) Equal(o *ArtifactConfig) bool {
   105  	if a == nil || o == nil {
   106  		return a == o
   107  	}
   108  	switch {
   109  	case !pointer.Eq(a.HTTPReadTimeout, o.HTTPReadTimeout):
   110  		return false
   111  	case !pointer.Eq(a.HTTPMaxSize, o.HTTPMaxSize):
   112  		return false
   113  	case !pointer.Eq(a.GCSTimeout, o.GCSTimeout):
   114  		return false
   115  	case !pointer.Eq(a.GitTimeout, o.GitTimeout):
   116  		return false
   117  	case !pointer.Eq(a.HgTimeout, o.HgTimeout):
   118  		return false
   119  	case !pointer.Eq(a.S3Timeout, o.S3Timeout):
   120  		return false
   121  	case !pointer.Eq(a.DecompressionFileCountLimit, o.DecompressionFileCountLimit):
   122  		return false
   123  	case !pointer.Eq(a.DecompressionSizeLimit, o.DecompressionSizeLimit):
   124  		return false
   125  	case !pointer.Eq(a.DisableFilesystemIsolation, o.DisableFilesystemIsolation):
   126  		return false
   127  	case !pointer.Eq(a.SetEnvironmentVariables, o.SetEnvironmentVariables):
   128  		return false
   129  	}
   130  	return true
   131  }
   132  
   133  func (a *ArtifactConfig) Validate() error {
   134  	if a == nil {
   135  		return fmt.Errorf("artifact must not be nil")
   136  	}
   137  
   138  	if a.HTTPReadTimeout == nil {
   139  		return fmt.Errorf("http_read_timeout must be set")
   140  	}
   141  	if v, err := time.ParseDuration(*a.HTTPReadTimeout); err != nil {
   142  		return fmt.Errorf("http_read_timeout not a valid duration: %w", err)
   143  	} else if v < 0 {
   144  		return fmt.Errorf("http_read_timeout must be > 0")
   145  	}
   146  
   147  	if a.HTTPMaxSize == nil {
   148  		return fmt.Errorf("http_max_size must be set")
   149  	}
   150  	if v, err := humanize.ParseBytes(*a.HTTPMaxSize); err != nil {
   151  		return fmt.Errorf("http_max_size not a valid size: %w", err)
   152  	} else if v > math.MaxInt64 {
   153  		return fmt.Errorf("http_max_size must be < %d but found %d", int64(math.MaxInt64), v)
   154  	}
   155  
   156  	if a.GCSTimeout == nil {
   157  		return fmt.Errorf("gcs_timeout must be set")
   158  	}
   159  	if v, err := time.ParseDuration(*a.GCSTimeout); err != nil {
   160  		return fmt.Errorf("gcs_timeout not a valid duration: %w", err)
   161  	} else if v < 0 {
   162  		return fmt.Errorf("gcs_timeout must be > 0")
   163  	}
   164  
   165  	if a.GitTimeout == nil {
   166  		return fmt.Errorf("git_timeout must be set")
   167  	}
   168  	if v, err := time.ParseDuration(*a.GitTimeout); err != nil {
   169  		return fmt.Errorf("git_timeout not a valid duration: %w", err)
   170  	} else if v < 0 {
   171  		return fmt.Errorf("git_timeout must be > 0")
   172  	}
   173  
   174  	if a.HgTimeout == nil {
   175  		return fmt.Errorf("hg_timeout must be set")
   176  	}
   177  	if v, err := time.ParseDuration(*a.HgTimeout); err != nil {
   178  		return fmt.Errorf("hg_timeout not a valid duration: %w", err)
   179  	} else if v < 0 {
   180  		return fmt.Errorf("hg_timeout must be > 0")
   181  	}
   182  
   183  	if a.S3Timeout == nil {
   184  		return fmt.Errorf("s3_timeout must be set")
   185  	}
   186  	if v, err := time.ParseDuration(*a.S3Timeout); err != nil {
   187  		return fmt.Errorf("s3_timeout not a valid duration: %w", err)
   188  	} else if v < 0 {
   189  		return fmt.Errorf("s3_timeout must be > 0")
   190  	}
   191  
   192  	if a.DecompressionFileCountLimit == nil {
   193  		return fmt.Errorf("decompression_file_count_limit must not be nil")
   194  	}
   195  	if v := *a.DecompressionFileCountLimit; v < 0 {
   196  		return fmt.Errorf("decompression_file_count_limit must be >= 0 but found %d", v)
   197  	}
   198  
   199  	if a.DecompressionSizeLimit == nil {
   200  		return fmt.Errorf("decompression_size_limit must not be nil")
   201  	}
   202  	if v, err := humanize.ParseBytes(*a.DecompressionSizeLimit); err != nil {
   203  		return fmt.Errorf("decompression_size_limit is not a valid size: %w", err)
   204  	} else if v > math.MaxInt64 {
   205  		return fmt.Errorf("decompression_size_limit must be < %d but found %d", int64(math.MaxInt64), v)
   206  	}
   207  
   208  	if a.DisableFilesystemIsolation == nil {
   209  		return fmt.Errorf("disable_filesystem_isolation must be set")
   210  	}
   211  
   212  	if a.SetEnvironmentVariables == nil {
   213  		return fmt.Errorf("set_environment_variables must be set")
   214  	}
   215  
   216  	return nil
   217  }
   218  
   219  func DefaultArtifactConfig() *ArtifactConfig {
   220  	return &ArtifactConfig{
   221  		// Read timeout for HTTP operations. Must be long enough to
   222  		// accommodate large/slow downloads.
   223  		HTTPReadTimeout: pointer.Of("30m"),
   224  
   225  		// Maximum download size. Must be large enough to accommodate
   226  		// large downloads.
   227  		HTTPMaxSize: pointer.Of("100GB"),
   228  
   229  		// Timeout for GCS operations. Must be long enough to
   230  		// accommodate large/slow downloads.
   231  		GCSTimeout: pointer.Of("30m"),
   232  
   233  		// Timeout for Git operations. Must be long enough to
   234  		// accommodate large/slow clones.
   235  		GitTimeout: pointer.Of("30m"),
   236  
   237  		// Timeout for Hg operations. Must be long enough to
   238  		// accommodate large/slow clones.
   239  		HgTimeout: pointer.Of("30m"),
   240  
   241  		// Timeout for S3 operations. Must be long enough to
   242  		// accommodate large/slow downloads.
   243  		S3Timeout: pointer.Of("30m"),
   244  
   245  		// DecompressionFileCountLimit limits the number of files decompressed
   246  		// for a single artifact. Must be large enough for payloads with lots
   247  		// of files.
   248  		DecompressionFileCountLimit: pointer.Of(4096),
   249  
   250  		// DecompressionSizeLimit limits the amount of data decompressed for
   251  		// a single artifact. Must be large enough to accommodate large payloads.
   252  		DecompressionSizeLimit: pointer.Of("100GB"),
   253  
   254  		// Toggle for disabling filesystem isolation, where available.
   255  		DisableFilesystemIsolation: pointer.Of(false),
   256  
   257  		// No environment variables are inherited from Client by default.
   258  		SetEnvironmentVariables: pointer.Of(""),
   259  	}
   260  }