github.com/hernad/nomad@v1.6.112/nomad/structs/structs.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package structs
     5  
     6  import (
     7  	"bytes"
     8  	"container/heap"
     9  	"crypto/md5"
    10  	"crypto/sha1"
    11  	"crypto/sha256"
    12  	"crypto/sha512"
    13  	"encoding/base32"
    14  	"encoding/base64"
    15  	"encoding/hex"
    16  	"errors"
    17  	"fmt"
    18  	"hash"
    19  	"hash/crc32"
    20  	"math"
    21  	"net"
    22  	"os"
    23  	"reflect"
    24  	"regexp"
    25  	"sort"
    26  	"strconv"
    27  	"strings"
    28  	"time"
    29  
    30  	jwt "github.com/golang-jwt/jwt/v5"
    31  	"github.com/hashicorp/cronexpr"
    32  	"github.com/hashicorp/go-msgpack/codec"
    33  	"github.com/hashicorp/go-multierror"
    34  	"github.com/hashicorp/go-set"
    35  	"github.com/hashicorp/go-version"
    36  	"github.com/hernad/nomad/acl"
    37  	"github.com/hernad/nomad/command/agent/host"
    38  	"github.com/hernad/nomad/command/agent/pprof"
    39  	"github.com/hernad/nomad/helper"
    40  	"github.com/hernad/nomad/helper/args"
    41  	"github.com/hernad/nomad/helper/constraints/semver"
    42  	"github.com/hernad/nomad/helper/escapingfs"
    43  	"github.com/hernad/nomad/helper/pointer"
    44  	"github.com/hernad/nomad/helper/uuid"
    45  	"github.com/hernad/nomad/lib/cpuset"
    46  	"github.com/hernad/nomad/lib/kheap"
    47  	psstructs "github.com/hernad/nomad/plugins/shared/structs"
    48  	"github.com/miekg/dns"
    49  	"github.com/mitchellh/copystructure"
    50  	"github.com/ryanuber/go-glob"
    51  	"golang.org/x/crypto/blake2b"
    52  	"golang.org/x/exp/maps"
    53  	"golang.org/x/exp/slices"
    54  )
    55  
    56  var (
    57  	// ValidPolicyName is used to validate a policy name
    58  	ValidPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
    59  
    60  	// b32 is a lowercase base32 encoding for use in URL friendly service hashes
    61  	b32 = base32.NewEncoding(strings.ToLower("abcdefghijklmnopqrstuvwxyz234567"))
    62  )
    63  
    64  type MessageType uint8
    65  
    66  // note: new raft message types need to be added to the end of this
    67  // list of contents
    68  const (
    69  	NodeRegisterRequestType                      MessageType = 0
    70  	NodeDeregisterRequestType                    MessageType = 1
    71  	NodeUpdateStatusRequestType                  MessageType = 2
    72  	NodeUpdateDrainRequestType                   MessageType = 3
    73  	JobRegisterRequestType                       MessageType = 4
    74  	JobDeregisterRequestType                     MessageType = 5
    75  	EvalUpdateRequestType                        MessageType = 6
    76  	EvalDeleteRequestType                        MessageType = 7
    77  	AllocUpdateRequestType                       MessageType = 8
    78  	AllocClientUpdateRequestType                 MessageType = 9
    79  	ReconcileJobSummariesRequestType             MessageType = 10
    80  	VaultAccessorRegisterRequestType             MessageType = 11
    81  	VaultAccessorDeregisterRequestType           MessageType = 12
    82  	ApplyPlanResultsRequestType                  MessageType = 13
    83  	DeploymentStatusUpdateRequestType            MessageType = 14
    84  	DeploymentPromoteRequestType                 MessageType = 15
    85  	DeploymentAllocHealthRequestType             MessageType = 16
    86  	DeploymentDeleteRequestType                  MessageType = 17
    87  	JobStabilityRequestType                      MessageType = 18
    88  	ACLPolicyUpsertRequestType                   MessageType = 19
    89  	ACLPolicyDeleteRequestType                   MessageType = 20
    90  	ACLTokenUpsertRequestType                    MessageType = 21
    91  	ACLTokenDeleteRequestType                    MessageType = 22
    92  	ACLTokenBootstrapRequestType                 MessageType = 23
    93  	AutopilotRequestType                         MessageType = 24
    94  	UpsertNodeEventsType                         MessageType = 25
    95  	JobBatchDeregisterRequestType                MessageType = 26
    96  	AllocUpdateDesiredTransitionRequestType      MessageType = 27
    97  	NodeUpdateEligibilityRequestType             MessageType = 28
    98  	BatchNodeUpdateDrainRequestType              MessageType = 29
    99  	SchedulerConfigRequestType                   MessageType = 30
   100  	NodeBatchDeregisterRequestType               MessageType = 31
   101  	ClusterMetadataRequestType                   MessageType = 32
   102  	ServiceIdentityAccessorRegisterRequestType   MessageType = 33
   103  	ServiceIdentityAccessorDeregisterRequestType MessageType = 34
   104  	CSIVolumeRegisterRequestType                 MessageType = 35
   105  	CSIVolumeDeregisterRequestType               MessageType = 36
   106  	CSIVolumeClaimRequestType                    MessageType = 37
   107  	ScalingEventRegisterRequestType              MessageType = 38
   108  	CSIVolumeClaimBatchRequestType               MessageType = 39
   109  	CSIPluginDeleteRequestType                   MessageType = 40
   110  	EventSinkUpsertRequestType                   MessageType = 41
   111  	EventSinkDeleteRequestType                   MessageType = 42
   112  	BatchEventSinkUpdateProgressType             MessageType = 43
   113  	OneTimeTokenUpsertRequestType                MessageType = 44
   114  	OneTimeTokenDeleteRequestType                MessageType = 45
   115  	OneTimeTokenExpireRequestType                MessageType = 46
   116  	ServiceRegistrationUpsertRequestType         MessageType = 47
   117  	ServiceRegistrationDeleteByIDRequestType     MessageType = 48
   118  	ServiceRegistrationDeleteByNodeIDRequestType MessageType = 49
   119  	VarApplyStateRequestType                     MessageType = 50
   120  	RootKeyMetaUpsertRequestType                 MessageType = 51
   121  	RootKeyMetaDeleteRequestType                 MessageType = 52
   122  	ACLRolesUpsertRequestType                    MessageType = 53
   123  	ACLRolesDeleteByIDRequestType                MessageType = 54
   124  	ACLAuthMethodsUpsertRequestType              MessageType = 55
   125  	ACLAuthMethodsDeleteRequestType              MessageType = 56
   126  	ACLBindingRulesUpsertRequestType             MessageType = 57
   127  	ACLBindingRulesDeleteRequestType             MessageType = 58
   128  	NodePoolUpsertRequestType                    MessageType = 59
   129  	NodePoolDeleteRequestType                    MessageType = 60
   130  
   131  	// Namespace types were moved from enterprise and therefore start at 64
   132  	NamespaceUpsertRequestType MessageType = 64
   133  	NamespaceDeleteRequestType MessageType = 65
   134  )
   135  
   136  const (
   137  	// SystemInitializationType is used for messages that initialize parts of
   138  	// the system, such as the state store. These messages are not included in
   139  	// the event stream.
   140  	SystemInitializationType MessageType = 127
   141  
   142  	// IgnoreUnknownTypeFlag is set along with a MessageType
   143  	// to indicate that the message type can be safely ignored
   144  	// if it is not recognized. This is for future proofing, so
   145  	// that new commands can be added in a way that won't cause
   146  	// old servers to crash when the FSM attempts to process them.
   147  	IgnoreUnknownTypeFlag MessageType = 128
   148  
   149  	// MsgTypeTestSetup is used during testing when calling state store
   150  	// methods directly that require an FSM MessageType
   151  	MsgTypeTestSetup MessageType = IgnoreUnknownTypeFlag
   152  
   153  	GetterModeAny  = "any"
   154  	GetterModeFile = "file"
   155  	GetterModeDir  = "dir"
   156  
   157  	// maxPolicyDescriptionLength limits a policy description length
   158  	maxPolicyDescriptionLength = 256
   159  
   160  	// maxTokenNameLength limits a ACL token name length
   161  	maxTokenNameLength = 256
   162  
   163  	// ACLClientToken and ACLManagementToken are the only types of tokens
   164  	ACLClientToken     = "client"
   165  	ACLManagementToken = "management"
   166  
   167  	// DefaultNamespace is the default namespace.
   168  	DefaultNamespace            = "default"
   169  	DefaultNamespaceDescription = "Default shared namespace"
   170  
   171  	// AllNamespacesSentinel is the value used as a namespace RPC value
   172  	// to indicate that endpoints must search in all namespaces
   173  	//
   174  	// Also defined in acl/acl.go to avoid circular dependencies. If modified
   175  	// it should be updated there as well.
   176  	AllNamespacesSentinel = "*"
   177  
   178  	// maxNamespaceDescriptionLength limits a namespace description length
   179  	maxNamespaceDescriptionLength = 256
   180  
   181  	// JitterFraction is a the limit to the amount of jitter we apply
   182  	// to a user specified MaxQueryTime. We divide the specified time by
   183  	// the fraction. So 16 == 6.25% limit of jitter. This jitter is also
   184  	// applied to RPCHoldTimeout.
   185  	JitterFraction = 16
   186  
   187  	// MaxRetainedNodeEvents is the maximum number of node events that will be
   188  	// retained for a single node
   189  	MaxRetainedNodeEvents = 10
   190  
   191  	// MaxRetainedNodeScores is the number of top scoring nodes for which we
   192  	// retain scoring metadata
   193  	MaxRetainedNodeScores = 5
   194  
   195  	// Normalized scorer name
   196  	NormScorerName = "normalized-score"
   197  
   198  	// MaxBlockingRPCQueryTime is used to bound the limit of a blocking query
   199  	MaxBlockingRPCQueryTime = 300 * time.Second
   200  
   201  	// DefaultBlockingRPCQueryTime is the amount of time we block waiting for a change
   202  	// if no time is specified. Previously we would wait the MaxBlockingRPCQueryTime.
   203  	DefaultBlockingRPCQueryTime = 300 * time.Second
   204  
   205  	// RateMetric constants are used as labels in RPC rate metrics
   206  	RateMetricRead  = "read"
   207  	RateMetricList  = "list"
   208  	RateMetricWrite = "write"
   209  )
   210  
   211  var (
   212  	// validNamespaceName is used to validate a namespace name
   213  	validNamespaceName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
   214  )
   215  
   216  // NamespacedID is a tuple of an ID and a namespace
   217  type NamespacedID struct {
   218  	ID        string
   219  	Namespace string
   220  }
   221  
   222  // NewNamespacedID returns a new namespaced ID given the ID and namespace
   223  func NewNamespacedID(id, ns string) NamespacedID {
   224  	return NamespacedID{
   225  		ID:        id,
   226  		Namespace: ns,
   227  	}
   228  }
   229  
   230  func (n NamespacedID) String() string {
   231  	return fmt.Sprintf("<ns: %q, id: %q>", n.Namespace, n.ID)
   232  }
   233  
   234  // RPCInfo is used to describe common information about query
   235  type RPCInfo interface {
   236  	RequestRegion() string
   237  	IsRead() bool
   238  	AllowStaleRead() bool
   239  	IsForwarded() bool
   240  	SetForwarded()
   241  	TimeToBlock() time.Duration
   242  	// SetTimeToBlock sets how long this request can block. The requested time may not be possible,
   243  	// so Callers should readback TimeToBlock. E.g. you cannot set time to block at all on WriteRequests
   244  	// and it cannot exceed MaxBlockingRPCQueryTime
   245  	SetTimeToBlock(t time.Duration)
   246  }
   247  
   248  // InternalRpcInfo allows adding internal RPC metadata to an RPC. This struct
   249  // should NOT be replicated in the API package as it is internal only.
   250  type InternalRpcInfo struct {
   251  	// Forwarded marks whether the RPC has been forwarded.
   252  	Forwarded bool
   253  }
   254  
   255  // IsForwarded returns whether the RPC is forwarded from another server.
   256  func (i *InternalRpcInfo) IsForwarded() bool {
   257  	return i.Forwarded
   258  }
   259  
   260  // SetForwarded marks that the RPC is being forwarded from another server.
   261  func (i *InternalRpcInfo) SetForwarded() {
   262  	i.Forwarded = true
   263  }
   264  
   265  // QueryOptions is used to specify various flags for read queries
   266  type QueryOptions struct {
   267  	// The target region for this query
   268  	Region string
   269  
   270  	// Namespace is the target namespace for the query.
   271  	//
   272  	// Since handlers do not have a default value set they should access
   273  	// the Namespace via the RequestNamespace method.
   274  	//
   275  	// Requests accessing specific namespaced objects must check ACLs
   276  	// against the namespace of the object, not the namespace in the
   277  	// request.
   278  	Namespace string
   279  
   280  	// If set, wait until query exceeds given index. Must be provided
   281  	// with MaxQueryTime.
   282  	MinQueryIndex uint64
   283  
   284  	// Provided with MinQueryIndex to wait for change.
   285  	MaxQueryTime time.Duration
   286  
   287  	// If set, any follower can service the request. Results
   288  	// may be arbitrarily stale.
   289  	AllowStale bool
   290  
   291  	// If set, used as prefix for resource list searches
   292  	Prefix string
   293  
   294  	// AuthToken is secret portion of the ACL token or workload identity used for
   295  	// the request.
   296  	AuthToken string
   297  
   298  	// Filter specifies the go-bexpr filter expression to be used for
   299  	// filtering the data prior to returning a response
   300  	Filter string
   301  
   302  	// PerPage is the number of entries to be returned in queries that support
   303  	// paginated lists.
   304  	PerPage int32
   305  
   306  	// NextToken is the token used to indicate where to start paging
   307  	// for queries that support paginated lists. This token should be
   308  	// the ID of the next object after the last one seen in the
   309  	// previous response.
   310  	NextToken string
   311  
   312  	// Reverse is used to reverse the default order of list results.
   313  	Reverse bool
   314  
   315  	identity *AuthenticatedIdentity
   316  
   317  	InternalRpcInfo
   318  }
   319  
   320  // TimeToBlock returns MaxQueryTime adjusted for maximums and defaults
   321  // it will return 0 if this is not a blocking query
   322  func (q QueryOptions) TimeToBlock() time.Duration {
   323  	if q.MinQueryIndex == 0 {
   324  		return 0
   325  	}
   326  	if q.MaxQueryTime > MaxBlockingRPCQueryTime {
   327  		return MaxBlockingRPCQueryTime
   328  	} else if q.MaxQueryTime <= 0 {
   329  		return DefaultBlockingRPCQueryTime
   330  	}
   331  	return q.MaxQueryTime
   332  }
   333  
   334  func (q *QueryOptions) SetTimeToBlock(t time.Duration) {
   335  	q.MaxQueryTime = t
   336  }
   337  
   338  func (q QueryOptions) RequestRegion() string {
   339  	return q.Region
   340  }
   341  
   342  // RequestNamespace returns the request's namespace or the default namespace if
   343  // no explicit namespace was sent.
   344  //
   345  // Requests accessing specific namespaced objects must check ACLs against the
   346  // namespace of the object, not the namespace in the request.
   347  func (q QueryOptions) RequestNamespace() string {
   348  	if q.Namespace == "" {
   349  		return DefaultNamespace
   350  	}
   351  	return q.Namespace
   352  }
   353  
   354  // IsRead only applies to reads, so always true.
   355  func (q QueryOptions) IsRead() bool {
   356  	return true
   357  }
   358  
   359  func (q QueryOptions) AllowStaleRead() bool {
   360  	return q.AllowStale
   361  }
   362  
   363  func (q *QueryOptions) GetAuthToken() string {
   364  	return q.AuthToken
   365  }
   366  
   367  func (q *QueryOptions) SetIdentity(identity *AuthenticatedIdentity) {
   368  	q.identity = identity
   369  }
   370  
   371  func (q QueryOptions) GetIdentity() *AuthenticatedIdentity {
   372  	return q.identity
   373  }
   374  
   375  // AgentPprofRequest is used to request a pprof report for a given node.
   376  type AgentPprofRequest struct {
   377  	// ReqType specifies the profile to use
   378  	ReqType pprof.ReqType
   379  
   380  	// Profile specifies the runtime/pprof profile to lookup and generate.
   381  	Profile string
   382  
   383  	// Seconds is the number of seconds to capture a profile
   384  	Seconds int
   385  
   386  	// Debug specifies if pprof profile should inclue debug output
   387  	Debug int
   388  
   389  	// GC specifies if the profile should call runtime.GC() before
   390  	// running its profile. This is only used for "heap" profiles
   391  	GC int
   392  
   393  	// NodeID is the node we want to track the logs of
   394  	NodeID string
   395  
   396  	// ServerID is the server we want to track the logs of
   397  	ServerID string
   398  
   399  	QueryOptions
   400  }
   401  
   402  // AgentPprofResponse is used to return a generated pprof profile
   403  type AgentPprofResponse struct {
   404  	// ID of the agent that fulfilled the request
   405  	AgentID string
   406  
   407  	// Payload is the generated pprof profile
   408  	Payload []byte
   409  
   410  	// HTTPHeaders are a set of key value pairs to be applied as
   411  	// HTTP headers for a specific runtime profile
   412  	HTTPHeaders map[string]string
   413  }
   414  
   415  type WriteRequest struct {
   416  	// The target region for this write
   417  	Region string
   418  
   419  	// Namespace is the target namespace for the write.
   420  	//
   421  	// Since RPC handlers do not have a default value set they should
   422  	// access the Namespace via the RequestNamespace method.
   423  	//
   424  	// Requests accessing specific namespaced objects must check ACLs
   425  	// against the namespace of the object, not the namespace in the
   426  	// request.
   427  	Namespace string
   428  
   429  	// AuthToken is secret portion of the ACL token used for the request
   430  	AuthToken string
   431  
   432  	// IdempotencyToken can be used to ensure the write is idempotent.
   433  	IdempotencyToken string
   434  
   435  	identity *AuthenticatedIdentity
   436  
   437  	InternalRpcInfo
   438  }
   439  
   440  func (w WriteRequest) TimeToBlock() time.Duration {
   441  	return 0
   442  }
   443  
   444  func (w WriteRequest) SetTimeToBlock(_ time.Duration) {
   445  }
   446  
   447  func (w WriteRequest) RequestRegion() string {
   448  	// The target region for this request
   449  	return w.Region
   450  }
   451  
   452  // RequestNamespace returns the request's namespace or the default namespace if
   453  // no explicit namespace was sent.
   454  //
   455  // Requests accessing specific namespaced objects must check ACLs against the
   456  // namespace of the object, not the namespace in the request.
   457  func (w WriteRequest) RequestNamespace() string {
   458  	if w.Namespace == "" {
   459  		return DefaultNamespace
   460  	}
   461  	return w.Namespace
   462  }
   463  
   464  // IsRead only applies to writes, always false.
   465  func (w WriteRequest) IsRead() bool {
   466  	return false
   467  }
   468  
   469  func (w WriteRequest) AllowStaleRead() bool {
   470  	return false
   471  }
   472  
   473  func (w *WriteRequest) GetAuthToken() string {
   474  	return w.AuthToken
   475  }
   476  
   477  func (w *WriteRequest) SetIdentity(identity *AuthenticatedIdentity) {
   478  	w.identity = identity
   479  }
   480  
   481  func (w WriteRequest) GetIdentity() *AuthenticatedIdentity {
   482  	return w.identity
   483  }
   484  
   485  // AuthenticatedIdentity is returned by the Authenticate method on server to
   486  // return a wrapper around the various elements that can be resolved as an
   487  // identity. RPC handlers will use the relevant fields for performing
   488  // authorization.
   489  //
   490  // Keeping these fields independent rather than merging them into an ephemeral
   491  // ACLToken makes the original of the credential clear to RPC handlers, who may
   492  // have different behavior for internal vs external origins.
   493  type AuthenticatedIdentity struct {
   494  	// ACLToken authenticated. Claims will be nil if this is set.
   495  	ACLToken *ACLToken
   496  
   497  	// Claims authenticated by workload identity. ACLToken will be nil if this is
   498  	// set.
   499  	Claims *IdentityClaims
   500  
   501  	ClientID string
   502  	TLSName  string
   503  	RemoteIP net.IP
   504  }
   505  
   506  func (ai *AuthenticatedIdentity) GetACLToken() *ACLToken {
   507  	if ai == nil {
   508  		return nil
   509  	}
   510  	return ai.ACLToken
   511  }
   512  
   513  func (ai *AuthenticatedIdentity) GetClaims() *IdentityClaims {
   514  	if ai == nil {
   515  		return nil
   516  	}
   517  	return ai.Claims
   518  }
   519  
   520  func (ai *AuthenticatedIdentity) String() string {
   521  	if ai == nil {
   522  		return "unauthenticated"
   523  	}
   524  	if ai.ACLToken != nil {
   525  		return "token:" + ai.ACLToken.AccessorID
   526  	}
   527  	if ai.Claims != nil {
   528  		return "alloc:" + ai.Claims.AllocationID
   529  	}
   530  	if ai.ClientID != "" {
   531  		return "client:" + ai.ClientID
   532  	}
   533  	return ai.TLSName + ":" + ai.RemoteIP.String()
   534  }
   535  
   536  func (ai *AuthenticatedIdentity) IsExpired(now time.Time) bool {
   537  	// Only ACLTokens currently support expiry so return unexpired if there isn't
   538  	// one.
   539  	if ai.ACLToken == nil {
   540  		return false
   541  	}
   542  
   543  	return ai.ACLToken.IsExpired(now)
   544  }
   545  
   546  type RequestWithIdentity interface {
   547  	GetAuthToken() string
   548  	SetIdentity(identity *AuthenticatedIdentity)
   549  	GetIdentity() *AuthenticatedIdentity
   550  }
   551  
   552  // QueryMeta allows a query response to include potentially
   553  // useful metadata about a query
   554  type QueryMeta struct {
   555  	// This is the index associated with the read
   556  	Index uint64
   557  
   558  	// If AllowStale is used, this is time elapsed since
   559  	// last contact between the follower and leader. This
   560  	// can be used to gauge staleness.
   561  	LastContact time.Duration
   562  
   563  	// Used to indicate if there is a known leader node
   564  	KnownLeader bool
   565  
   566  	// NextToken is the token returned with queries that support
   567  	// paginated lists. To resume paging from this point, pass
   568  	// this token in the next request's QueryOptions.
   569  	NextToken string
   570  }
   571  
   572  // WriteMeta allows a write response to include potentially
   573  // useful metadata about the write
   574  type WriteMeta struct {
   575  	// This is the index associated with the write
   576  	Index uint64
   577  }
   578  
   579  // NodeRegisterRequest is used for Node.Register endpoint
   580  // to register a node as being a schedulable entity.
   581  type NodeRegisterRequest struct {
   582  	Node      *Node
   583  	NodeEvent *NodeEvent
   584  
   585  	// CreateNodePool is used to indicate that the node's node pool should be
   586  	// create along with the node registration if it doesn't exist.
   587  	CreateNodePool bool
   588  
   589  	WriteRequest
   590  }
   591  
   592  // NodeDeregisterRequest is used for Node.Deregister endpoint
   593  // to deregister a node as being a schedulable entity.
   594  type NodeDeregisterRequest struct {
   595  	NodeID string
   596  	WriteRequest
   597  }
   598  
   599  // NodeBatchDeregisterRequest is used for Node.BatchDeregister endpoint
   600  // to deregister a batch of nodes from being schedulable entities.
   601  type NodeBatchDeregisterRequest struct {
   602  	NodeIDs []string
   603  	WriteRequest
   604  }
   605  
   606  // NodeServerInfo is used to in NodeUpdateResponse to return Nomad server
   607  // information used in RPC server lists.
   608  type NodeServerInfo struct {
   609  	// RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to
   610  	// be contacted at for RPCs.
   611  	RPCAdvertiseAddr string
   612  
   613  	// RpcMajorVersion is the major version number the Nomad Server
   614  	// supports
   615  	RPCMajorVersion int32
   616  
   617  	// RpcMinorVersion is the minor version number the Nomad Server
   618  	// supports
   619  	RPCMinorVersion int32
   620  
   621  	// Datacenter is the datacenter that a Nomad server belongs to
   622  	Datacenter string
   623  }
   624  
   625  // NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint
   626  // to update the status of a node.
   627  type NodeUpdateStatusRequest struct {
   628  	NodeID    string
   629  	Status    string
   630  	NodeEvent *NodeEvent
   631  	UpdatedAt int64
   632  	WriteRequest
   633  }
   634  
   635  // NodeUpdateDrainRequest is used for updating the drain strategy
   636  type NodeUpdateDrainRequest struct {
   637  	NodeID        string
   638  	DrainStrategy *DrainStrategy
   639  
   640  	// MarkEligible marks the node as eligible if removing the drain strategy.
   641  	MarkEligible bool
   642  
   643  	// NodeEvent is the event added to the node
   644  	NodeEvent *NodeEvent
   645  
   646  	// UpdatedAt represents server time of receiving request
   647  	UpdatedAt int64
   648  
   649  	// Meta is user-provided metadata relating to the drain operation
   650  	Meta map[string]string
   651  
   652  	WriteRequest
   653  }
   654  
   655  // BatchNodeUpdateDrainRequest is used for updating the drain strategy for a
   656  // batch of nodes
   657  type BatchNodeUpdateDrainRequest struct {
   658  	// Updates is a mapping of nodes to their updated drain strategy
   659  	Updates map[string]*DrainUpdate
   660  
   661  	// NodeEvents is a mapping of the node to the event to add to the node
   662  	NodeEvents map[string]*NodeEvent
   663  
   664  	// UpdatedAt represents server time of receiving request
   665  	UpdatedAt int64
   666  
   667  	WriteRequest
   668  }
   669  
   670  // DrainUpdate is used to update the drain of a node
   671  type DrainUpdate struct {
   672  	// DrainStrategy is the new strategy for the node
   673  	DrainStrategy *DrainStrategy
   674  
   675  	// MarkEligible marks the node as eligible if removing the drain strategy.
   676  	MarkEligible bool
   677  }
   678  
   679  // NodeUpdateEligibilityRequest is used for updating the scheduling	eligibility
   680  type NodeUpdateEligibilityRequest struct {
   681  	NodeID      string
   682  	Eligibility string
   683  
   684  	// NodeEvent is the event added to the node
   685  	NodeEvent *NodeEvent
   686  
   687  	// UpdatedAt represents server time of receiving request
   688  	UpdatedAt int64
   689  
   690  	WriteRequest
   691  }
   692  
   693  // NodeEvaluateRequest is used to re-evaluate the node
   694  type NodeEvaluateRequest struct {
   695  	NodeID string
   696  	WriteRequest
   697  }
   698  
   699  // NodeSpecificRequest is used when we just need to specify a target node
   700  type NodeSpecificRequest struct {
   701  	NodeID   string
   702  	SecretID string
   703  	QueryOptions
   704  }
   705  
   706  // JobRegisterRequest is used for Job.Register endpoint
   707  // to register a job as being a schedulable entity.
   708  type JobRegisterRequest struct {
   709  	Submission *JobSubmission
   710  
   711  	// Job is the parsed job, no matter what form the input was in.
   712  	Job *Job
   713  
   714  	// If EnforceIndex is set then the job will only be registered if the passed
   715  	// JobModifyIndex matches the current Jobs index. If the index is zero, the
   716  	// register only occurs if the job is new.
   717  	EnforceIndex   bool
   718  	JobModifyIndex uint64
   719  
   720  	// PreserveCounts indicates that during job update, existing task group
   721  	// counts should be preserved, over those specified in the new job spec
   722  	// PreserveCounts is ignored for newly created jobs.
   723  	PreserveCounts bool
   724  
   725  	// PolicyOverride is set when the user is attempting to override any policies
   726  	PolicyOverride bool
   727  
   728  	// EvalPriority is an optional priority to use on any evaluation created as
   729  	// a result on this job registration. This value must be between 1-100
   730  	// inclusively, where a larger value corresponds to a higher priority. This
   731  	// is useful when an operator wishes to push through a job registration in
   732  	// busy clusters with a large evaluation backlog. This avoids needing to
   733  	// change the job priority which also impacts preemption.
   734  	EvalPriority int
   735  
   736  	// Eval is the evaluation that is associated with the job registration
   737  	Eval *Evaluation
   738  
   739  	// Deployment is the deployment to be create when the job is registered. If
   740  	// there is an active deployment for the job it will be canceled.
   741  	Deployment *Deployment
   742  
   743  	WriteRequest
   744  }
   745  
   746  // JobDeregisterRequest is used for Job.Deregister endpoint
   747  // to deregister a job as being a schedulable entity.
   748  type JobDeregisterRequest struct {
   749  	JobID string
   750  
   751  	// Purge controls whether the deregister purges the job from the system or
   752  	// whether the job is just marked as stopped and will be removed by the
   753  	// garbage collector
   754  	Purge bool
   755  
   756  	// Global controls whether all regions of a multi-region job are
   757  	// deregistered. It is ignored for single-region jobs.
   758  	Global bool
   759  
   760  	// EvalPriority is an optional priority to use on any evaluation created as
   761  	// a result on this job deregistration. This value must be between 1-100
   762  	// inclusively, where a larger value corresponds to a higher priority. This
   763  	// is useful when an operator wishes to push through a job deregistration
   764  	// in busy clusters with a large evaluation backlog.
   765  	EvalPriority int
   766  
   767  	// NoShutdownDelay, if set to true, will override the group and
   768  	// task shutdown_delay configuration and ignore the delay for any
   769  	// allocations stopped as a result of this Deregister call.
   770  	NoShutdownDelay bool
   771  
   772  	// Eval is the evaluation to create that's associated with job deregister
   773  	Eval *Evaluation
   774  
   775  	WriteRequest
   776  }
   777  
   778  // JobBatchDeregisterRequest is used to batch deregister jobs and upsert
   779  // evaluations.
   780  type JobBatchDeregisterRequest struct {
   781  	// Jobs is the set of jobs to deregister
   782  	Jobs map[NamespacedID]*JobDeregisterOptions
   783  
   784  	// Evals is the set of evaluations to create.
   785  	Evals []*Evaluation
   786  
   787  	WriteRequest
   788  }
   789  
   790  // JobDeregisterOptions configures how a job is deregistered.
   791  type JobDeregisterOptions struct {
   792  	// Purge controls whether the deregister purges the job from the system or
   793  	// whether the job is just marked as stopped and will be removed by the
   794  	// garbage collector
   795  	Purge bool
   796  }
   797  
   798  // JobEvaluateRequest is used when we just need to re-evaluate a target job
   799  type JobEvaluateRequest struct {
   800  	JobID       string
   801  	EvalOptions EvalOptions
   802  	WriteRequest
   803  }
   804  
   805  // EvalOptions is used to encapsulate options when forcing a job evaluation
   806  type EvalOptions struct {
   807  	ForceReschedule bool
   808  }
   809  
   810  // JobSubmissionRequest is used to query a JobSubmission object associated with a
   811  // job at a specific version.
   812  type JobSubmissionRequest struct {
   813  	JobID   string
   814  	Version uint64
   815  
   816  	QueryOptions
   817  }
   818  
   819  // JobSubmissionResponse contains a JobSubmission object, which may be nil
   820  // if no submission data is available.
   821  type JobSubmissionResponse struct {
   822  	Submission *JobSubmission
   823  
   824  	QueryMeta
   825  }
   826  
   827  // JobSpecificRequest is used when we just need to specify a target job
   828  type JobSpecificRequest struct {
   829  	JobID string
   830  	All   bool
   831  	QueryOptions
   832  }
   833  
   834  // JobListRequest is used to parameterize a list request
   835  type JobListRequest struct {
   836  	QueryOptions
   837  	Fields *JobStubFields
   838  }
   839  
   840  // Stub returns a summarized version of the job
   841  type JobStubFields struct {
   842  	Meta bool
   843  }
   844  
   845  // JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run
   846  // evaluation of the Job.
   847  type JobPlanRequest struct {
   848  	Job  *Job
   849  	Diff bool // Toggles an annotated diff
   850  	// PolicyOverride is set when the user is attempting to override any policies
   851  	PolicyOverride bool
   852  	WriteRequest
   853  }
   854  
   855  // JobScaleRequest is used for the Job.Scale endpoint to scale one of the
   856  // scaling targets in a job
   857  type JobScaleRequest struct {
   858  	JobID   string
   859  	Target  map[string]string
   860  	Count   *int64
   861  	Message string
   862  	Error   bool
   863  	Meta    map[string]interface{}
   864  	// PolicyOverride is set when the user is attempting to override any policies
   865  	PolicyOverride bool
   866  	WriteRequest
   867  }
   868  
   869  // Validate is used to validate the arguments in the request
   870  func (r *JobScaleRequest) Validate() error {
   871  	namespace := r.Target[ScalingTargetNamespace]
   872  	if namespace != "" && namespace != r.RequestNamespace() {
   873  		return NewErrRPCCoded(400, "namespace in payload did not match header")
   874  	}
   875  
   876  	jobID := r.Target[ScalingTargetJob]
   877  	if jobID != "" && jobID != r.JobID {
   878  		return fmt.Errorf("job ID in payload did not match URL")
   879  	}
   880  
   881  	groupName := r.Target[ScalingTargetGroup]
   882  	if groupName == "" {
   883  		return NewErrRPCCoded(400, "missing task group name for scaling action")
   884  	}
   885  
   886  	if r.Count != nil {
   887  		if *r.Count < 0 {
   888  			return NewErrRPCCoded(400, "scaling action count can't be negative")
   889  		}
   890  
   891  		if r.Error {
   892  			return NewErrRPCCoded(400, "scaling action should not contain count if error is true")
   893  		}
   894  
   895  		truncCount := int(*r.Count)
   896  		if int64(truncCount) != *r.Count {
   897  			return NewErrRPCCoded(400,
   898  				fmt.Sprintf("new scaling count is too large for TaskGroup.Count (int): %v", r.Count))
   899  		}
   900  	}
   901  
   902  	return nil
   903  }
   904  
   905  // JobSummaryRequest is used when we just need to get a specific job summary
   906  type JobSummaryRequest struct {
   907  	JobID string
   908  	QueryOptions
   909  }
   910  
   911  // JobScaleStatusRequest is used to get the scale status for a job
   912  type JobScaleStatusRequest struct {
   913  	JobID string
   914  	QueryOptions
   915  }
   916  
   917  // JobDispatchRequest is used to dispatch a job based on a parameterized job
   918  type JobDispatchRequest struct {
   919  	JobID   string
   920  	Payload []byte
   921  	Meta    map[string]string
   922  	WriteRequest
   923  	IdPrefixTemplate string
   924  }
   925  
   926  // JobValidateRequest is used to validate a job
   927  type JobValidateRequest struct {
   928  	Job *Job
   929  	WriteRequest
   930  }
   931  
   932  // JobRevertRequest is used to revert a job to a prior version.
   933  type JobRevertRequest struct {
   934  	// JobID is the ID of the job  being reverted
   935  	JobID string
   936  
   937  	// JobVersion the version to revert to.
   938  	JobVersion uint64
   939  
   940  	// EnforcePriorVersion if set will enforce that the job is at the given
   941  	// version before reverting.
   942  	EnforcePriorVersion *uint64
   943  
   944  	// ConsulToken is the Consul token that proves the submitter of the job revert
   945  	// has access to the Service Identity policies associated with the job's
   946  	// Consul Connect enabled services. This field is only used to transfer the
   947  	// token and is not stored after the Job revert.
   948  	ConsulToken string
   949  
   950  	// VaultToken is the Vault token that proves the submitter of the job revert
   951  	// has access to any Vault policies specified in the targeted job version. This
   952  	// field is only used to transfer the token and is not stored after the Job
   953  	// revert.
   954  	VaultToken string
   955  
   956  	WriteRequest
   957  }
   958  
   959  // JobStabilityRequest is used to marked a job as stable.
   960  type JobStabilityRequest struct {
   961  	// Job to set the stability on
   962  	JobID      string
   963  	JobVersion uint64
   964  
   965  	// Set the stability
   966  	Stable bool
   967  	WriteRequest
   968  }
   969  
   970  // JobStabilityResponse is the response when marking a job as stable.
   971  type JobStabilityResponse struct {
   972  	WriteMeta
   973  }
   974  
   975  // NodeListRequest is used to parameterize a list request
   976  type NodeListRequest struct {
   977  	QueryOptions
   978  
   979  	Fields *NodeStubFields
   980  }
   981  
   982  // EvalUpdateRequest is used for upserting evaluations.
   983  type EvalUpdateRequest struct {
   984  	Evals     []*Evaluation
   985  	EvalToken string
   986  	WriteRequest
   987  }
   988  
   989  // EvalReapRequest is used for reaping evaluations and allocation. This struct
   990  // is used by the Eval.Reap RPC endpoint as a request argument, and also when
   991  // performing eval reap or deletes via Raft. This is because Eval.Reap and
   992  // Eval.Delete use the same Raft message when performing deletes so we do not
   993  // need more Raft message types.
   994  type EvalReapRequest struct {
   995  	Evals  []string // slice of Evaluation IDs
   996  	Allocs []string // slice of Allocation IDs
   997  
   998  	// Filter specifies the go-bexpr filter expression to be used for
   999  	// filtering the data prior to returning a response
  1000  	Filter    string
  1001  	PerPage   int32
  1002  	NextToken string
  1003  
  1004  	// UserInitiated tracks whether this reap request is the result of an
  1005  	// operator request. If this is true, the FSM needs to ensure the eval
  1006  	// broker is paused as the request can include non-terminal allocations.
  1007  	UserInitiated bool
  1008  
  1009  	WriteRequest
  1010  }
  1011  
  1012  // EvalSpecificRequest is used when we just need to specify a target evaluation
  1013  type EvalSpecificRequest struct {
  1014  	EvalID         string
  1015  	IncludeRelated bool
  1016  	QueryOptions
  1017  }
  1018  
  1019  // EvalAckRequest is used to Ack/Nack a specific evaluation
  1020  type EvalAckRequest struct {
  1021  	EvalID string
  1022  	Token  string
  1023  	WriteRequest
  1024  }
  1025  
  1026  // EvalDequeueRequest is used when we want to dequeue an evaluation
  1027  type EvalDequeueRequest struct {
  1028  	Schedulers       []string
  1029  	Timeout          time.Duration
  1030  	SchedulerVersion uint16
  1031  	WriteRequest
  1032  }
  1033  
  1034  // EvalListRequest is used to list the evaluations
  1035  type EvalListRequest struct {
  1036  	FilterJobID      string
  1037  	FilterEvalStatus string
  1038  	QueryOptions
  1039  }
  1040  
  1041  // ShouldBeFiltered indicates that the eval should be filtered (that
  1042  // is, removed) from the results
  1043  func (req *EvalListRequest) ShouldBeFiltered(e *Evaluation) bool {
  1044  	if req.FilterJobID != "" && req.FilterJobID != e.JobID {
  1045  		return true
  1046  	}
  1047  	if req.FilterEvalStatus != "" && req.FilterEvalStatus != e.Status {
  1048  		return true
  1049  	}
  1050  	return false
  1051  }
  1052  
  1053  // EvalCountRequest is used to count evaluations
  1054  type EvalCountRequest struct {
  1055  	QueryOptions
  1056  }
  1057  
  1058  // PlanRequest is used to submit an allocation plan to the leader
  1059  type PlanRequest struct {
  1060  	Plan *Plan
  1061  	WriteRequest
  1062  }
  1063  
  1064  // ApplyPlanResultsRequest is used by the planner to apply a Raft transaction
  1065  // committing the result of a plan.
  1066  type ApplyPlanResultsRequest struct {
  1067  	// AllocUpdateRequest holds the allocation updates to be made by the
  1068  	// scheduler.
  1069  	AllocUpdateRequest
  1070  
  1071  	// Deployment is the deployment created or updated as a result of a
  1072  	// scheduling event.
  1073  	Deployment *Deployment
  1074  
  1075  	// DeploymentUpdates is a set of status updates to apply to the given
  1076  	// deployments. This allows the scheduler to cancel any unneeded deployment
  1077  	// because the job is stopped or the update block is removed.
  1078  	DeploymentUpdates []*DeploymentStatusUpdate
  1079  
  1080  	// EvalID is the eval ID of the plan being applied. The modify index of the
  1081  	// evaluation is updated as part of applying the plan to ensure that subsequent
  1082  	// scheduling events for the same job will wait for the index that last produced
  1083  	// state changes. This is necessary for blocked evaluations since they can be
  1084  	// processed many times, potentially making state updates, without the state of
  1085  	// the evaluation itself being updated.
  1086  	EvalID string
  1087  
  1088  	// COMPAT 0.11
  1089  	// NodePreemptions is a slice of allocations from other lower priority jobs
  1090  	// that are preempted. Preempted allocations are marked as evicted.
  1091  	// Deprecated: Replaced with AllocsPreempted which contains only the diff
  1092  	NodePreemptions []*Allocation
  1093  
  1094  	// AllocsPreempted is a slice of allocation diffs from other lower priority jobs
  1095  	// that are preempted. Preempted allocations are marked as evicted.
  1096  	AllocsPreempted []*AllocationDiff
  1097  
  1098  	// PreemptionEvals is a slice of follow up evals for jobs whose allocations
  1099  	// have been preempted to place allocs in this plan
  1100  	PreemptionEvals []*Evaluation
  1101  
  1102  	// IneligibleNodes are nodes the plan applier has repeatedly rejected
  1103  	// placements for and should therefore be considered ineligible by workers
  1104  	// to avoid retrying them repeatedly.
  1105  	IneligibleNodes []string
  1106  
  1107  	// UpdatedAt represents server time of receiving request.
  1108  	UpdatedAt int64
  1109  }
  1110  
  1111  // AllocUpdateRequest is used to submit changes to allocations, either
  1112  // to cause evictions or to assign new allocations. Both can be done
  1113  // within a single transaction
  1114  type AllocUpdateRequest struct {
  1115  	// COMPAT 0.11
  1116  	// Alloc is the list of new allocations to assign
  1117  	// Deprecated: Replaced with two separate slices, one containing stopped allocations
  1118  	// and another containing updated allocations
  1119  	Alloc []*Allocation
  1120  
  1121  	// Allocations to stop. Contains only the diff, not the entire allocation
  1122  	AllocsStopped []*AllocationDiff
  1123  
  1124  	// New or updated allocations
  1125  	AllocsUpdated []*Allocation
  1126  
  1127  	// Evals is the list of new evaluations to create
  1128  	// Evals are valid only when used in the Raft RPC
  1129  	Evals []*Evaluation
  1130  
  1131  	// Job is the shared parent job of the allocations.
  1132  	// It is pulled out since it is common to reduce payload size.
  1133  	Job *Job
  1134  
  1135  	WriteRequest
  1136  }
  1137  
  1138  // AllocUpdateDesiredTransitionRequest is used to submit changes to allocations
  1139  // desired transition state.
  1140  type AllocUpdateDesiredTransitionRequest struct {
  1141  	// Allocs is the mapping of allocation ids to their desired state
  1142  	// transition
  1143  	Allocs map[string]*DesiredTransition
  1144  
  1145  	// Evals is the set of evaluations to create
  1146  	Evals []*Evaluation
  1147  
  1148  	WriteRequest
  1149  }
  1150  
  1151  // AllocStopRequest is used to stop and reschedule a running Allocation.
  1152  type AllocStopRequest struct {
  1153  	AllocID         string
  1154  	NoShutdownDelay bool
  1155  
  1156  	WriteRequest
  1157  }
  1158  
  1159  // AllocStopResponse is the response to an `AllocStopRequest`
  1160  type AllocStopResponse struct {
  1161  	// EvalID is the id of the follow up evalution for the rescheduled alloc.
  1162  	EvalID string
  1163  
  1164  	WriteMeta
  1165  }
  1166  
  1167  // AllocListRequest is used to request a list of allocations
  1168  type AllocListRequest struct {
  1169  	QueryOptions
  1170  
  1171  	Fields *AllocStubFields
  1172  }
  1173  
  1174  // AllocSpecificRequest is used to query a specific allocation
  1175  type AllocSpecificRequest struct {
  1176  	AllocID string
  1177  	QueryOptions
  1178  }
  1179  
  1180  // AllocSignalRequest is used to signal a specific allocation
  1181  type AllocSignalRequest struct {
  1182  	AllocID string
  1183  	Task    string
  1184  	Signal  string
  1185  	QueryOptions
  1186  }
  1187  
  1188  // AllocsGetRequest is used to query a set of allocations
  1189  type AllocsGetRequest struct {
  1190  	AllocIDs []string
  1191  	QueryOptions
  1192  }
  1193  
  1194  // AllocRestartRequest is used to restart a specific allocations tasks.
  1195  type AllocRestartRequest struct {
  1196  	AllocID  string
  1197  	TaskName string
  1198  	AllTasks bool
  1199  
  1200  	QueryOptions
  1201  }
  1202  
  1203  // PeriodicForceRequest is used to force a specific periodic job.
  1204  type PeriodicForceRequest struct {
  1205  	JobID string
  1206  	WriteRequest
  1207  }
  1208  
  1209  // ServerMembersResponse has the list of servers in a cluster
  1210  type ServerMembersResponse struct {
  1211  	ServerName   string
  1212  	ServerRegion string
  1213  	ServerDC     string
  1214  	Members      []*ServerMember
  1215  }
  1216  
  1217  // ServerMember holds information about a Nomad server agent in a cluster
  1218  type ServerMember struct {
  1219  	Name        string
  1220  	Addr        net.IP
  1221  	Port        uint16
  1222  	Tags        map[string]string
  1223  	Status      string
  1224  	ProtocolMin uint8
  1225  	ProtocolMax uint8
  1226  	ProtocolCur uint8
  1227  	DelegateMin uint8
  1228  	DelegateMax uint8
  1229  	DelegateCur uint8
  1230  }
  1231  
  1232  // ClusterMetadata is used to store per-cluster metadata.
  1233  type ClusterMetadata struct {
  1234  	ClusterID  string
  1235  	CreateTime int64
  1236  }
  1237  
  1238  // DeriveVaultTokenRequest is used to request wrapped Vault tokens for the
  1239  // following tasks in the given allocation
  1240  type DeriveVaultTokenRequest struct {
  1241  	NodeID   string
  1242  	SecretID string
  1243  	AllocID  string
  1244  	Tasks    []string
  1245  	QueryOptions
  1246  }
  1247  
  1248  // VaultAccessorsRequest is used to operate on a set of Vault accessors
  1249  type VaultAccessorsRequest struct {
  1250  	Accessors []*VaultAccessor
  1251  }
  1252  
  1253  // VaultAccessor is a reference to a created Vault token on behalf of
  1254  // an allocation's task.
  1255  type VaultAccessor struct {
  1256  	AllocID     string
  1257  	Task        string
  1258  	NodeID      string
  1259  	Accessor    string
  1260  	CreationTTL int
  1261  
  1262  	// Raft Indexes
  1263  	CreateIndex uint64
  1264  }
  1265  
  1266  // DeriveVaultTokenResponse returns the wrapped tokens for each requested task
  1267  type DeriveVaultTokenResponse struct {
  1268  	// Tasks is a mapping between the task name and the wrapped token
  1269  	Tasks map[string]string
  1270  
  1271  	// Error stores any error that occurred. Errors are stored here so we can
  1272  	// communicate whether it is retryable
  1273  	Error *RecoverableError
  1274  
  1275  	QueryMeta
  1276  }
  1277  
  1278  // GenericRequest is used to request where no
  1279  // specific information is needed.
  1280  type GenericRequest struct {
  1281  	QueryOptions
  1282  }
  1283  
  1284  // DeploymentListRequest is used to list the deployments
  1285  type DeploymentListRequest struct {
  1286  	QueryOptions
  1287  }
  1288  
  1289  // DeploymentDeleteRequest is used for deleting deployments.
  1290  type DeploymentDeleteRequest struct {
  1291  	Deployments []string
  1292  	WriteRequest
  1293  }
  1294  
  1295  // DeploymentStatusUpdateRequest is used to update the status of a deployment as
  1296  // well as optionally creating an evaluation atomically.
  1297  type DeploymentStatusUpdateRequest struct {
  1298  	// Eval, if set, is used to create an evaluation at the same time as
  1299  	// updating the status of a deployment.
  1300  	Eval *Evaluation
  1301  
  1302  	// DeploymentUpdate is a status update to apply to the given
  1303  	// deployment.
  1304  	DeploymentUpdate *DeploymentStatusUpdate
  1305  
  1306  	// Job is used to optionally upsert a job. This is used when setting the
  1307  	// allocation health results in a deployment failure and the deployment
  1308  	// auto-reverts to the latest stable job.
  1309  	Job *Job
  1310  }
  1311  
  1312  // DeploymentAllocHealthRequest is used to set the health of a set of
  1313  // allocations as part of a deployment.
  1314  type DeploymentAllocHealthRequest struct {
  1315  	DeploymentID string
  1316  
  1317  	// Marks these allocations as healthy, allow further allocations
  1318  	// to be rolled.
  1319  	HealthyAllocationIDs []string
  1320  
  1321  	// Any unhealthy allocations fail the deployment
  1322  	UnhealthyAllocationIDs []string
  1323  
  1324  	WriteRequest
  1325  }
  1326  
  1327  // ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft
  1328  type ApplyDeploymentAllocHealthRequest struct {
  1329  	DeploymentAllocHealthRequest
  1330  
  1331  	// Timestamp is the timestamp to use when setting the allocations health.
  1332  	Timestamp time.Time
  1333  
  1334  	// An optional field to update the status of a deployment
  1335  	DeploymentUpdate *DeploymentStatusUpdate
  1336  
  1337  	// Job is used to optionally upsert a job. This is used when setting the
  1338  	// allocation health results in a deployment failure and the deployment
  1339  	// auto-reverts to the latest stable job.
  1340  	Job *Job
  1341  
  1342  	// An optional evaluation to create after promoting the canaries
  1343  	Eval *Evaluation
  1344  }
  1345  
  1346  // DeploymentPromoteRequest is used to promote task groups in a deployment
  1347  type DeploymentPromoteRequest struct {
  1348  	DeploymentID string
  1349  
  1350  	// All is to promote all task groups
  1351  	All bool
  1352  
  1353  	// Groups is used to set the promotion status per task group
  1354  	Groups []string
  1355  
  1356  	WriteRequest
  1357  }
  1358  
  1359  // ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft
  1360  type ApplyDeploymentPromoteRequest struct {
  1361  	DeploymentPromoteRequest
  1362  
  1363  	// An optional evaluation to create after promoting the canaries
  1364  	Eval *Evaluation
  1365  }
  1366  
  1367  // DeploymentPauseRequest is used to pause a deployment
  1368  type DeploymentPauseRequest struct {
  1369  	DeploymentID string
  1370  
  1371  	// Pause sets the pause status
  1372  	Pause bool
  1373  
  1374  	WriteRequest
  1375  }
  1376  
  1377  // DeploymentRunRequest is used to remotely start a pending deployment.
  1378  // Used only for multiregion deployments.
  1379  type DeploymentRunRequest struct {
  1380  	DeploymentID string
  1381  
  1382  	WriteRequest
  1383  }
  1384  
  1385  // DeploymentUnblockRequest is used to remotely unblock a deployment.
  1386  // Used only for multiregion deployments.
  1387  type DeploymentUnblockRequest struct {
  1388  	DeploymentID string
  1389  
  1390  	WriteRequest
  1391  }
  1392  
  1393  // DeploymentCancelRequest is used to remotely cancel a deployment.
  1394  // Used only for multiregion deployments.
  1395  type DeploymentCancelRequest struct {
  1396  	DeploymentID string
  1397  
  1398  	WriteRequest
  1399  }
  1400  
  1401  // DeploymentSpecificRequest is used to make a request specific to a particular
  1402  // deployment
  1403  type DeploymentSpecificRequest struct {
  1404  	DeploymentID string
  1405  	QueryOptions
  1406  }
  1407  
  1408  // DeploymentFailRequest is used to fail a particular deployment
  1409  type DeploymentFailRequest struct {
  1410  	DeploymentID string
  1411  	WriteRequest
  1412  }
  1413  
  1414  // ScalingPolicySpecificRequest is used when we just need to specify a target scaling policy
  1415  type ScalingPolicySpecificRequest struct {
  1416  	ID string
  1417  	QueryOptions
  1418  }
  1419  
  1420  // SingleScalingPolicyResponse is used to return a single job
  1421  type SingleScalingPolicyResponse struct {
  1422  	Policy *ScalingPolicy
  1423  	QueryMeta
  1424  }
  1425  
  1426  // ScalingPolicyListRequest is used to parameterize a scaling policy list request
  1427  type ScalingPolicyListRequest struct {
  1428  	Job  string
  1429  	Type string
  1430  	QueryOptions
  1431  }
  1432  
  1433  // ScalingPolicyListResponse is used for a list request
  1434  type ScalingPolicyListResponse struct {
  1435  	Policies []*ScalingPolicyListStub
  1436  	QueryMeta
  1437  }
  1438  
  1439  // SingleDeploymentResponse is used to respond with a single deployment
  1440  type SingleDeploymentResponse struct {
  1441  	Deployment *Deployment
  1442  	QueryMeta
  1443  }
  1444  
  1445  // GenericResponse is used to respond to a request where no
  1446  // specific response information is needed.
  1447  type GenericResponse struct {
  1448  	WriteMeta
  1449  }
  1450  
  1451  // VersionResponse is used for the Status.Version response
  1452  type VersionResponse struct {
  1453  	Build    string
  1454  	Versions map[string]int
  1455  	QueryMeta
  1456  }
  1457  
  1458  // JobRegisterResponse is used to respond to a job registration
  1459  type JobRegisterResponse struct {
  1460  	EvalID          string
  1461  	EvalCreateIndex uint64
  1462  	JobModifyIndex  uint64
  1463  
  1464  	// Warnings contains any warnings about the given job. These may include
  1465  	// deprecation warnings.
  1466  	Warnings string
  1467  
  1468  	QueryMeta
  1469  }
  1470  
  1471  // JobDeregisterResponse is used to respond to a job deregistration
  1472  type JobDeregisterResponse struct {
  1473  	EvalID          string
  1474  	EvalCreateIndex uint64
  1475  	JobModifyIndex  uint64
  1476  	VolumeEvalID    string
  1477  	VolumeEvalIndex uint64
  1478  	QueryMeta
  1479  }
  1480  
  1481  // JobBatchDeregisterResponse is used to respond to a batch job deregistration
  1482  type JobBatchDeregisterResponse struct {
  1483  	// JobEvals maps the job to its created evaluation
  1484  	JobEvals map[NamespacedID]string
  1485  	QueryMeta
  1486  }
  1487  
  1488  // JobValidateResponse is the response from validate request
  1489  type JobValidateResponse struct {
  1490  	// DriverConfigValidated indicates whether the agent validated the driver
  1491  	// config
  1492  	DriverConfigValidated bool
  1493  
  1494  	// ValidationErrors is a list of validation errors
  1495  	ValidationErrors []string
  1496  
  1497  	// Error is a string version of any error that may have occurred
  1498  	Error string
  1499  
  1500  	// Warnings contains any warnings about the given job. These may include
  1501  	// deprecation warnings.
  1502  	Warnings string
  1503  }
  1504  
  1505  // NodeUpdateResponse is used to respond to a node update
  1506  type NodeUpdateResponse struct {
  1507  	HeartbeatTTL    time.Duration
  1508  	EvalIDs         []string
  1509  	EvalCreateIndex uint64
  1510  	NodeModifyIndex uint64
  1511  
  1512  	// Features informs clients what enterprise features are allowed
  1513  	Features uint64
  1514  
  1515  	// LeaderRPCAddr is the RPC address of the current Raft Leader.  If
  1516  	// empty, the current Nomad Server is in the minority of a partition.
  1517  	LeaderRPCAddr string
  1518  
  1519  	// NumNodes is the number of Nomad nodes attached to this quorum of
  1520  	// Nomad Servers at the time of the response.  This value can
  1521  	// fluctuate based on the health of the cluster between heartbeats.
  1522  	NumNodes int32
  1523  
  1524  	// Servers is the full list of known Nomad servers in the local
  1525  	// region.
  1526  	Servers []*NodeServerInfo
  1527  
  1528  	// SchedulingEligibility is used to inform clients what the server-side
  1529  	// has for their scheduling status during heartbeats.
  1530  	SchedulingEligibility string
  1531  
  1532  	QueryMeta
  1533  }
  1534  
  1535  // NodeDrainUpdateResponse is used to respond to a node drain update
  1536  type NodeDrainUpdateResponse struct {
  1537  	NodeModifyIndex uint64
  1538  	EvalIDs         []string
  1539  	EvalCreateIndex uint64
  1540  	WriteMeta
  1541  }
  1542  
  1543  // NodeEligibilityUpdateResponse is used to respond to a node eligibility update
  1544  type NodeEligibilityUpdateResponse struct {
  1545  	NodeModifyIndex uint64
  1546  	EvalIDs         []string
  1547  	EvalCreateIndex uint64
  1548  	WriteMeta
  1549  }
  1550  
  1551  // NodeAllocsResponse is used to return allocs for a single node
  1552  type NodeAllocsResponse struct {
  1553  	Allocs []*Allocation
  1554  	QueryMeta
  1555  }
  1556  
  1557  // NodeClientAllocsResponse is used to return allocs meta data for a single node
  1558  type NodeClientAllocsResponse struct {
  1559  	Allocs map[string]uint64
  1560  
  1561  	// MigrateTokens are used when ACLs are enabled to allow cross node,
  1562  	// authenticated access to sticky volumes
  1563  	MigrateTokens map[string]string
  1564  
  1565  	QueryMeta
  1566  }
  1567  
  1568  // SingleNodeResponse is used to return a single node
  1569  type SingleNodeResponse struct {
  1570  	Node *Node
  1571  	QueryMeta
  1572  }
  1573  
  1574  // NodeListResponse is used for a list request
  1575  type NodeListResponse struct {
  1576  	Nodes []*NodeListStub
  1577  	QueryMeta
  1578  }
  1579  
  1580  // SingleJobResponse is used to return a single job
  1581  type SingleJobResponse struct {
  1582  	Job *Job
  1583  	QueryMeta
  1584  }
  1585  
  1586  // JobSummaryResponse is used to return a single job summary
  1587  type JobSummaryResponse struct {
  1588  	JobSummary *JobSummary
  1589  	QueryMeta
  1590  }
  1591  
  1592  // JobScaleStatusResponse is used to return the scale status for a job
  1593  type JobScaleStatusResponse struct {
  1594  	JobScaleStatus *JobScaleStatus
  1595  	QueryMeta
  1596  }
  1597  
  1598  type JobScaleStatus struct {
  1599  	JobID          string
  1600  	Namespace      string
  1601  	JobCreateIndex uint64
  1602  	JobModifyIndex uint64
  1603  	JobStopped     bool
  1604  	TaskGroups     map[string]*TaskGroupScaleStatus
  1605  }
  1606  
  1607  // TaskGroupScaleStatus is used to return the scale status for a given task group
  1608  type TaskGroupScaleStatus struct {
  1609  	Desired   int
  1610  	Placed    int
  1611  	Running   int
  1612  	Healthy   int
  1613  	Unhealthy int
  1614  	Events    []*ScalingEvent
  1615  }
  1616  
  1617  type JobDispatchResponse struct {
  1618  	DispatchedJobID string
  1619  	EvalID          string
  1620  	EvalCreateIndex uint64
  1621  	JobCreateIndex  uint64
  1622  	WriteMeta
  1623  }
  1624  
  1625  // JobListResponse is used for a list request
  1626  type JobListResponse struct {
  1627  	Jobs []*JobListStub
  1628  	QueryMeta
  1629  }
  1630  
  1631  // JobVersionsRequest is used to get a jobs versions
  1632  type JobVersionsRequest struct {
  1633  	JobID string
  1634  	Diffs bool
  1635  	QueryOptions
  1636  }
  1637  
  1638  // JobVersionsResponse is used for a job get versions request
  1639  type JobVersionsResponse struct {
  1640  	Versions []*Job
  1641  	Diffs    []*JobDiff
  1642  	QueryMeta
  1643  }
  1644  
  1645  // JobPlanResponse is used to respond to a job plan request
  1646  type JobPlanResponse struct {
  1647  	// Annotations stores annotations explaining decisions the scheduler made.
  1648  	Annotations *PlanAnnotations
  1649  
  1650  	// FailedTGAllocs is the placement failures per task group.
  1651  	FailedTGAllocs map[string]*AllocMetric
  1652  
  1653  	// JobModifyIndex is the modification index of the job. The value can be
  1654  	// used when running `nomad run` to ensure that the Job wasn’t modified
  1655  	// since the last plan. If the job is being created, the value is zero.
  1656  	JobModifyIndex uint64
  1657  
  1658  	// CreatedEvals is the set of evaluations created by the scheduler. The
  1659  	// reasons for this can be rolling-updates or blocked evals.
  1660  	CreatedEvals []*Evaluation
  1661  
  1662  	// Diff contains the diff of the job and annotations on whether the change
  1663  	// causes an in-place update or create/destroy
  1664  	Diff *JobDiff
  1665  
  1666  	// NextPeriodicLaunch is the time duration till the job would be launched if
  1667  	// submitted.
  1668  	NextPeriodicLaunch time.Time
  1669  
  1670  	// Warnings contains any warnings about the given job. These may include
  1671  	// deprecation warnings.
  1672  	Warnings string
  1673  
  1674  	WriteMeta
  1675  }
  1676  
  1677  // SingleAllocResponse is used to return a single allocation
  1678  type SingleAllocResponse struct {
  1679  	Alloc *Allocation
  1680  	QueryMeta
  1681  }
  1682  
  1683  // AllocsGetResponse is used to return a set of allocations
  1684  type AllocsGetResponse struct {
  1685  	Allocs []*Allocation
  1686  	QueryMeta
  1687  }
  1688  
  1689  // JobAllocationsResponse is used to return the allocations for a job
  1690  type JobAllocationsResponse struct {
  1691  	Allocations []*AllocListStub
  1692  	QueryMeta
  1693  }
  1694  
  1695  // JobEvaluationsResponse is used to return the evaluations for a job
  1696  type JobEvaluationsResponse struct {
  1697  	Evaluations []*Evaluation
  1698  	QueryMeta
  1699  }
  1700  
  1701  // SingleEvalResponse is used to return a single evaluation
  1702  type SingleEvalResponse struct {
  1703  	Eval *Evaluation
  1704  	QueryMeta
  1705  }
  1706  
  1707  // EvalDequeueResponse is used to return from a dequeue
  1708  type EvalDequeueResponse struct {
  1709  	Eval  *Evaluation
  1710  	Token string
  1711  
  1712  	// WaitIndex is the Raft index the worker should wait until invoking the
  1713  	// scheduler.
  1714  	WaitIndex uint64
  1715  
  1716  	QueryMeta
  1717  }
  1718  
  1719  // GetWaitIndex is used to retrieve the Raft index in which state should be at
  1720  // or beyond before invoking the scheduler.
  1721  func (e *EvalDequeueResponse) GetWaitIndex() uint64 {
  1722  	// Prefer the wait index sent. This will be populated on all responses from
  1723  	// 0.7.0 and above
  1724  	if e.WaitIndex != 0 {
  1725  		return e.WaitIndex
  1726  	} else if e.Eval != nil {
  1727  		return e.Eval.ModifyIndex
  1728  	}
  1729  
  1730  	// This should never happen
  1731  	return 1
  1732  }
  1733  
  1734  // PlanResponse is used to return from a PlanRequest
  1735  type PlanResponse struct {
  1736  	Result *PlanResult
  1737  	WriteMeta
  1738  }
  1739  
  1740  // AllocListResponse is used for a list request
  1741  type AllocListResponse struct {
  1742  	Allocations []*AllocListStub
  1743  	QueryMeta
  1744  }
  1745  
  1746  // DeploymentListResponse is used for a list request
  1747  type DeploymentListResponse struct {
  1748  	Deployments []*Deployment
  1749  	QueryMeta
  1750  }
  1751  
  1752  // EvalListResponse is used for a list request
  1753  type EvalListResponse struct {
  1754  	Evaluations []*Evaluation
  1755  	QueryMeta
  1756  }
  1757  
  1758  // EvalCountResponse is used for a count request
  1759  type EvalCountResponse struct {
  1760  	Count int
  1761  	QueryMeta
  1762  }
  1763  
  1764  // EvalAllocationsResponse is used to return the allocations for an evaluation
  1765  type EvalAllocationsResponse struct {
  1766  	Allocations []*AllocListStub
  1767  	QueryMeta
  1768  }
  1769  
  1770  // PeriodicForceResponse is used to respond to a periodic job force launch
  1771  type PeriodicForceResponse struct {
  1772  	EvalID          string
  1773  	EvalCreateIndex uint64
  1774  	WriteMeta
  1775  }
  1776  
  1777  // DeploymentUpdateResponse is used to respond to a deployment change. The
  1778  // response will include the modify index of the deployment as well as details
  1779  // of any triggered evaluation.
  1780  type DeploymentUpdateResponse struct {
  1781  	EvalID                string
  1782  	EvalCreateIndex       uint64
  1783  	DeploymentModifyIndex uint64
  1784  
  1785  	// RevertedJobVersion is the version the job was reverted to. If unset, the
  1786  	// job wasn't reverted
  1787  	RevertedJobVersion *uint64
  1788  
  1789  	WriteMeta
  1790  }
  1791  
  1792  // NodeConnQueryResponse is used to respond to a query of whether a server has
  1793  // a connection to a specific Node
  1794  type NodeConnQueryResponse struct {
  1795  	// Connected indicates whether a connection to the Client exists
  1796  	Connected bool
  1797  
  1798  	// Established marks the time at which the connection was established
  1799  	Established time.Time
  1800  
  1801  	QueryMeta
  1802  }
  1803  
  1804  // HostDataRequest is used by /agent/host to retrieve data about the agent's host system. If
  1805  // ServerID or NodeID is specified, the request is forwarded to the remote agent
  1806  type HostDataRequest struct {
  1807  	ServerID string
  1808  	NodeID   string
  1809  	QueryOptions
  1810  }
  1811  
  1812  // HostDataResponse contains the HostData content
  1813  type HostDataResponse struct {
  1814  	AgentID  string
  1815  	HostData *host.HostData
  1816  }
  1817  
  1818  // EmitNodeEventsRequest is a request to update the node events source
  1819  // with a new client-side event
  1820  type EmitNodeEventsRequest struct {
  1821  	// NodeEvents are a map where the key is a node id, and value is a list of
  1822  	// events for that node
  1823  	NodeEvents map[string][]*NodeEvent
  1824  
  1825  	WriteRequest
  1826  }
  1827  
  1828  // EmitNodeEventsResponse is a response to the client about the status of
  1829  // the node event source update.
  1830  type EmitNodeEventsResponse struct {
  1831  	WriteMeta
  1832  }
  1833  
  1834  const (
  1835  	NodeEventSubsystemDrain     = "Drain"
  1836  	NodeEventSubsystemDriver    = "Driver"
  1837  	NodeEventSubsystemHeartbeat = "Heartbeat"
  1838  	NodeEventSubsystemCluster   = "Cluster"
  1839  	NodeEventSubsystemScheduler = "Scheduler"
  1840  	NodeEventSubsystemStorage   = "Storage"
  1841  )
  1842  
  1843  // NodeEvent is a single unit representing a node’s state change
  1844  type NodeEvent struct {
  1845  	Message     string
  1846  	Subsystem   string
  1847  	Details     map[string]string
  1848  	Timestamp   time.Time
  1849  	CreateIndex uint64
  1850  }
  1851  
  1852  func (ne *NodeEvent) String() string {
  1853  	var details []string
  1854  	for k, v := range ne.Details {
  1855  		details = append(details, fmt.Sprintf("%s: %s", k, v))
  1856  	}
  1857  
  1858  	return fmt.Sprintf("Message: %s, Subsystem: %s, Details: %s, Timestamp: %s", ne.Message, ne.Subsystem, strings.Join(details, ","), ne.Timestamp.String())
  1859  }
  1860  
  1861  func (ne *NodeEvent) Copy() *NodeEvent {
  1862  	c := new(NodeEvent)
  1863  	*c = *ne
  1864  	c.Details = maps.Clone(ne.Details)
  1865  	return c
  1866  }
  1867  
  1868  // NewNodeEvent generates a new node event storing the current time as the
  1869  // timestamp
  1870  func NewNodeEvent() *NodeEvent {
  1871  	return &NodeEvent{Timestamp: time.Now()}
  1872  }
  1873  
  1874  // SetMessage is used to set the message on the node event
  1875  func (ne *NodeEvent) SetMessage(msg string) *NodeEvent {
  1876  	ne.Message = msg
  1877  	return ne
  1878  }
  1879  
  1880  // SetSubsystem is used to set the subsystem on the node event
  1881  func (ne *NodeEvent) SetSubsystem(sys string) *NodeEvent {
  1882  	ne.Subsystem = sys
  1883  	return ne
  1884  }
  1885  
  1886  // SetTimestamp is used to set the timestamp on the node event
  1887  func (ne *NodeEvent) SetTimestamp(ts time.Time) *NodeEvent {
  1888  	ne.Timestamp = ts
  1889  	return ne
  1890  }
  1891  
  1892  // AddDetail is used to add a detail to the node event
  1893  func (ne *NodeEvent) AddDetail(k, v string) *NodeEvent {
  1894  	if ne.Details == nil {
  1895  		ne.Details = make(map[string]string, 1)
  1896  	}
  1897  	ne.Details[k] = v
  1898  	return ne
  1899  }
  1900  
  1901  const (
  1902  	NodeStatusInit         = "initializing"
  1903  	NodeStatusReady        = "ready"
  1904  	NodeStatusDown         = "down"
  1905  	NodeStatusDisconnected = "disconnected"
  1906  )
  1907  
  1908  // ShouldDrainNode checks if a given node status should trigger an
  1909  // evaluation. Some states don't require any further action.
  1910  func ShouldDrainNode(status string) bool {
  1911  	switch status {
  1912  	case NodeStatusInit, NodeStatusReady, NodeStatusDisconnected:
  1913  		return false
  1914  	case NodeStatusDown:
  1915  		return true
  1916  	default:
  1917  		panic(fmt.Sprintf("unhandled node status %s", status))
  1918  	}
  1919  }
  1920  
  1921  // ValidNodeStatus is used to check if a node status is valid
  1922  func ValidNodeStatus(status string) bool {
  1923  	switch status {
  1924  	case NodeStatusInit, NodeStatusReady, NodeStatusDown, NodeStatusDisconnected:
  1925  		return true
  1926  	default:
  1927  		return false
  1928  	}
  1929  }
  1930  
  1931  const (
  1932  	// NodeSchedulingEligible and Ineligible marks the node as eligible or not,
  1933  	// respectively, for receiving allocations. This is orthogonal to the node
  1934  	// status being ready.
  1935  	NodeSchedulingEligible   = "eligible"
  1936  	NodeSchedulingIneligible = "ineligible"
  1937  )
  1938  
  1939  // DrainSpec describes a Node's desired drain behavior.
  1940  type DrainSpec struct {
  1941  	// Deadline is the duration after StartTime when the remaining
  1942  	// allocations on a draining Node should be told to stop.
  1943  	Deadline time.Duration
  1944  
  1945  	// IgnoreSystemJobs allows systems jobs to remain on the node even though it
  1946  	// has been marked for draining.
  1947  	IgnoreSystemJobs bool
  1948  }
  1949  
  1950  // DrainStrategy describes a Node's drain behavior.
  1951  type DrainStrategy struct {
  1952  	// DrainSpec is the user declared drain specification
  1953  	DrainSpec
  1954  
  1955  	// ForceDeadline is the deadline time for the drain after which drains will
  1956  	// be forced
  1957  	ForceDeadline time.Time
  1958  
  1959  	// StartedAt is the time the drain process started
  1960  	StartedAt time.Time
  1961  }
  1962  
  1963  func (d *DrainStrategy) Copy() *DrainStrategy {
  1964  	if d == nil {
  1965  		return nil
  1966  	}
  1967  
  1968  	nd := new(DrainStrategy)
  1969  	*nd = *d
  1970  	return nd
  1971  }
  1972  
  1973  // DeadlineTime returns a boolean whether the drain strategy allows an infinite
  1974  // duration or otherwise the deadline time. The force drain is captured by the
  1975  // deadline time being in the past.
  1976  func (d *DrainStrategy) DeadlineTime() (infinite bool, deadline time.Time) {
  1977  	// Treat the nil case as a force drain so during an upgrade where a node may
  1978  	// not have a drain strategy but has Drain set to true, it is treated as a
  1979  	// force to mimick old behavior.
  1980  	if d == nil {
  1981  		return false, time.Time{}
  1982  	}
  1983  
  1984  	ns := d.Deadline.Nanoseconds()
  1985  	switch {
  1986  	case ns < 0: // Force
  1987  		return false, time.Time{}
  1988  	case ns == 0: // Infinite
  1989  		return true, time.Time{}
  1990  	default:
  1991  		return false, d.ForceDeadline
  1992  	}
  1993  }
  1994  
  1995  func (d *DrainStrategy) Equal(o *DrainStrategy) bool {
  1996  	if d == nil && o == nil {
  1997  		return true
  1998  	} else if o != nil && d == nil {
  1999  		return false
  2000  	} else if d != nil && o == nil {
  2001  		return false
  2002  	}
  2003  
  2004  	// Compare values
  2005  	if d.ForceDeadline != o.ForceDeadline {
  2006  		return false
  2007  	} else if d.Deadline != o.Deadline {
  2008  		return false
  2009  	} else if d.IgnoreSystemJobs != o.IgnoreSystemJobs {
  2010  		return false
  2011  	}
  2012  
  2013  	return true
  2014  }
  2015  
  2016  const (
  2017  	// DrainStatuses are the various states a drain can be in, as reflect in DrainMetadata
  2018  	DrainStatusDraining DrainStatus = "draining"
  2019  	DrainStatusComplete DrainStatus = "complete"
  2020  	DrainStatusCanceled DrainStatus = "canceled"
  2021  )
  2022  
  2023  type DrainStatus string
  2024  
  2025  // DrainMetadata contains information about the most recent drain operation for a given Node.
  2026  type DrainMetadata struct {
  2027  	// StartedAt is the time that the drain operation started. This is equal to Node.DrainStrategy.StartedAt,
  2028  	// if it exists
  2029  	StartedAt time.Time
  2030  
  2031  	// UpdatedAt is the time that that this struct was most recently updated, either via API action
  2032  	// or drain completion
  2033  	UpdatedAt time.Time
  2034  
  2035  	// Status reflects the status of the drain operation.
  2036  	Status DrainStatus
  2037  
  2038  	// AccessorID is the accessor ID of the ACL token used in the most recent API operation against this drain
  2039  	AccessorID string
  2040  
  2041  	// Meta includes the operator-submitted metadata about this drain operation
  2042  	Meta map[string]string
  2043  }
  2044  
  2045  func (m *DrainMetadata) Copy() *DrainMetadata {
  2046  	if m == nil {
  2047  		return nil
  2048  	}
  2049  	c := new(DrainMetadata)
  2050  	*c = *m
  2051  	c.Meta = maps.Clone(m.Meta)
  2052  	return c
  2053  }
  2054  
  2055  // Node is a representation of a schedulable client node
  2056  type Node struct {
  2057  	// ID is a unique identifier for the node. It can be constructed
  2058  	// by doing a concatenation of the Name and Datacenter as a simple
  2059  	// approach. Alternatively a UUID may be used.
  2060  	ID string
  2061  
  2062  	// SecretID is an ID that is only known by the Node and the set of Servers.
  2063  	// It is not accessible via the API and is used to authenticate nodes
  2064  	// conducting privileged activities.
  2065  	SecretID string
  2066  
  2067  	// Datacenter for this node
  2068  	Datacenter string
  2069  
  2070  	// Node name
  2071  	Name string
  2072  
  2073  	// CgroupParent for this node (linux only)
  2074  	CgroupParent string
  2075  
  2076  	// HTTPAddr is the address on which the Nomad client is listening for http
  2077  	// requests
  2078  	HTTPAddr string
  2079  
  2080  	// TLSEnabled indicates if the Agent has TLS enabled for the HTTP API
  2081  	TLSEnabled bool
  2082  
  2083  	// Attributes is an arbitrary set of key/value
  2084  	// data that can be used for constraints. Examples
  2085  	// include "kernel.name=linux", "arch=386", "driver.docker=1",
  2086  	// "docker.runtime=1.8.3"
  2087  	Attributes map[string]string
  2088  
  2089  	// NodeResources captures the available resources on the client.
  2090  	NodeResources *NodeResources
  2091  
  2092  	// ReservedResources captures the set resources on the client that are
  2093  	// reserved from scheduling.
  2094  	ReservedResources *NodeReservedResources
  2095  
  2096  	// Resources is the available resources on the client.
  2097  	// For example 'cpu=2' 'memory=2048'
  2098  	// COMPAT(0.10): Remove after 0.10
  2099  	Resources *Resources
  2100  
  2101  	// Reserved is the set of resources that are reserved,
  2102  	// and should be subtracted from the total resources for
  2103  	// the purposes of scheduling. This may be provide certain
  2104  	// high-watermark tolerances or because of external schedulers
  2105  	// consuming resources.
  2106  	// COMPAT(0.10): Remove after 0.10
  2107  	Reserved *Resources
  2108  
  2109  	// Links are used to 'link' this client to external
  2110  	// systems. For example 'consul=foo.dc1' 'aws=i-83212'
  2111  	// 'ami=ami-123'
  2112  	Links map[string]string
  2113  
  2114  	// Meta is used to associate arbitrary metadata with this
  2115  	// client. This is opaque to Nomad.
  2116  	Meta map[string]string
  2117  
  2118  	// NodeClass is an opaque identifier used to group nodes
  2119  	// together for the purpose of determining scheduling pressure.
  2120  	NodeClass string
  2121  
  2122  	// NodePool is the node pool the node belongs to.
  2123  	NodePool string
  2124  
  2125  	// ComputedClass is a unique id that identifies nodes with a common set of
  2126  	// attributes and capabilities.
  2127  	ComputedClass string
  2128  
  2129  	// DrainStrategy determines the node's draining behavior.
  2130  	// Will be non-nil only while draining.
  2131  	DrainStrategy *DrainStrategy
  2132  
  2133  	// SchedulingEligibility determines whether this node will receive new
  2134  	// placements.
  2135  	SchedulingEligibility string
  2136  
  2137  	// Status of this node
  2138  	Status string
  2139  
  2140  	// StatusDescription is meant to provide more human useful information
  2141  	StatusDescription string
  2142  
  2143  	// StatusUpdatedAt is the time stamp at which the state of the node was
  2144  	// updated
  2145  	StatusUpdatedAt int64
  2146  
  2147  	// Events is the most recent set of events generated for the node,
  2148  	// retaining only MaxRetainedNodeEvents number at a time
  2149  	Events []*NodeEvent
  2150  
  2151  	// Drivers is a map of driver names to current driver information
  2152  	Drivers map[string]*DriverInfo
  2153  
  2154  	// CSIControllerPlugins is a map of plugin names to current CSI Plugin info
  2155  	CSIControllerPlugins map[string]*CSIInfo
  2156  	// CSINodePlugins is a map of plugin names to current CSI Plugin info
  2157  	CSINodePlugins map[string]*CSIInfo
  2158  
  2159  	// HostVolumes is a map of host volume names to their configuration
  2160  	HostVolumes map[string]*ClientHostVolumeConfig
  2161  
  2162  	// HostNetworks is a map of host host_network names to their configuration
  2163  	HostNetworks map[string]*ClientHostNetworkConfig
  2164  
  2165  	// LastDrain contains metadata about the most recent drain operation
  2166  	LastDrain *DrainMetadata
  2167  
  2168  	// LastMissedHeartbeatIndex stores the Raft index when the node last missed
  2169  	// a heartbeat. It resets to zero once the node is marked as ready again.
  2170  	LastMissedHeartbeatIndex uint64
  2171  
  2172  	// LastAllocUpdateIndex stores the Raft index of the last time the node
  2173  	// updatedd its allocations status.
  2174  	LastAllocUpdateIndex uint64
  2175  
  2176  	// Raft Indexes
  2177  	CreateIndex uint64
  2178  	ModifyIndex uint64
  2179  }
  2180  
  2181  // GetID is a helper for getting the ID when the object may be nil and is
  2182  // required for pagination.
  2183  func (n *Node) GetID() string {
  2184  	if n == nil {
  2185  		return ""
  2186  	}
  2187  	return n.ID
  2188  }
  2189  
  2190  // Sanitize returns a copy of the Node omitting confidential fields
  2191  // It only returns a copy if the Node contains the confidential fields
  2192  func (n *Node) Sanitize() *Node {
  2193  	if n == nil {
  2194  		return nil
  2195  	}
  2196  	if n.SecretID == "" {
  2197  		return n
  2198  	}
  2199  	clean := n.Copy()
  2200  	clean.SecretID = ""
  2201  	return clean
  2202  }
  2203  
  2204  // Ready returns true if the node is ready for running allocations
  2205  func (n *Node) Ready() bool {
  2206  	return n.Status == NodeStatusReady && n.DrainStrategy == nil && n.SchedulingEligibility == NodeSchedulingEligible
  2207  }
  2208  
  2209  func (n *Node) Canonicalize() {
  2210  	if n == nil {
  2211  		return
  2212  	}
  2213  
  2214  	if n.NodePool == "" {
  2215  		n.NodePool = NodePoolDefault
  2216  	}
  2217  
  2218  	// Ensure SchedulingEligibility is correctly set whenever draining so the plan applier and other scheduling logic
  2219  	// only need to check SchedulingEligibility when determining whether a placement is feasible on a node.
  2220  	if n.DrainStrategy != nil {
  2221  		n.SchedulingEligibility = NodeSchedulingIneligible
  2222  	} else if n.SchedulingEligibility == "" {
  2223  		n.SchedulingEligibility = NodeSchedulingEligible
  2224  	}
  2225  
  2226  	// COMPAT remove in 1.0
  2227  	// In v0.12.0 we introduced a separate node specific network resource struct
  2228  	// so we need to covert any pre 0.12 clients to the correct struct
  2229  	if n.NodeResources != nil && n.NodeResources.NodeNetworks == nil {
  2230  		if n.NodeResources.Networks != nil {
  2231  			for _, nr := range n.NodeResources.Networks {
  2232  				nnr := &NodeNetworkResource{
  2233  					Mode:   nr.Mode,
  2234  					Speed:  nr.MBits,
  2235  					Device: nr.Device,
  2236  				}
  2237  				if nr.IP != "" {
  2238  					nnr.Addresses = []NodeNetworkAddress{
  2239  						{
  2240  							Alias:   "default",
  2241  							Address: nr.IP,
  2242  						},
  2243  					}
  2244  				}
  2245  				n.NodeResources.NodeNetworks = append(n.NodeResources.NodeNetworks, nnr)
  2246  			}
  2247  		}
  2248  	}
  2249  }
  2250  
  2251  func (n *Node) Copy() *Node {
  2252  	if n == nil {
  2253  		return nil
  2254  	}
  2255  	nn := *n
  2256  	nn.Attributes = maps.Clone(nn.Attributes)
  2257  	nn.NodeResources = nn.NodeResources.Copy()
  2258  	nn.ReservedResources = nn.ReservedResources.Copy()
  2259  	nn.Resources = nn.Resources.Copy()
  2260  	nn.Reserved = nn.Reserved.Copy()
  2261  	nn.Links = maps.Clone(nn.Links)
  2262  	nn.Meta = maps.Clone(nn.Meta)
  2263  	nn.DrainStrategy = nn.DrainStrategy.Copy()
  2264  	nn.Events = helper.CopySlice(n.Events)
  2265  	nn.Drivers = helper.DeepCopyMap(n.Drivers)
  2266  	nn.CSIControllerPlugins = helper.DeepCopyMap(nn.CSIControllerPlugins)
  2267  	nn.CSINodePlugins = helper.DeepCopyMap(nn.CSINodePlugins)
  2268  	nn.HostVolumes = helper.DeepCopyMap(n.HostVolumes)
  2269  	nn.HostNetworks = helper.DeepCopyMap(n.HostNetworks)
  2270  	nn.LastDrain = nn.LastDrain.Copy()
  2271  	return &nn
  2272  }
  2273  
  2274  // UnresponsiveStatus returns true if the node is a status where it is not
  2275  // communicating with the server.
  2276  func (n *Node) UnresponsiveStatus() bool {
  2277  	switch n.Status {
  2278  	case NodeStatusDown, NodeStatusDisconnected:
  2279  		return true
  2280  	default:
  2281  		return false
  2282  	}
  2283  }
  2284  
  2285  // TerminalStatus returns if the current status is terminal and
  2286  // will no longer transition.
  2287  func (n *Node) TerminalStatus() bool {
  2288  	switch n.Status {
  2289  	case NodeStatusDown:
  2290  		return true
  2291  	default:
  2292  		return false
  2293  	}
  2294  }
  2295  
  2296  // ComparableReservedResources returns the reserved resouces on the node
  2297  // handling upgrade paths. Reserved networks must be handled separately. After
  2298  // 0.11 calls to this should be replaced with:
  2299  // node.ReservedResources.Comparable()
  2300  //
  2301  // COMPAT(0.11): Remove in 0.11
  2302  func (n *Node) ComparableReservedResources() *ComparableResources {
  2303  	// See if we can no-op
  2304  	if n.Reserved == nil && n.ReservedResources == nil {
  2305  		return nil
  2306  	}
  2307  
  2308  	// Node already has 0.9+ behavior
  2309  	if n.ReservedResources != nil {
  2310  		return n.ReservedResources.Comparable()
  2311  	}
  2312  
  2313  	// Upgrade path
  2314  	return &ComparableResources{
  2315  		Flattened: AllocatedTaskResources{
  2316  			Cpu: AllocatedCpuResources{
  2317  				CpuShares: int64(n.Reserved.CPU),
  2318  			},
  2319  			Memory: AllocatedMemoryResources{
  2320  				MemoryMB: int64(n.Reserved.MemoryMB),
  2321  			},
  2322  		},
  2323  		Shared: AllocatedSharedResources{
  2324  			DiskMB: int64(n.Reserved.DiskMB),
  2325  		},
  2326  	}
  2327  }
  2328  
  2329  // ComparableResources returns the resouces on the node
  2330  // handling upgrade paths. Networking must be handled separately. After 0.11
  2331  // calls to this should be replaced with: node.NodeResources.Comparable()
  2332  //
  2333  // // COMPAT(0.11): Remove in 0.11
  2334  func (n *Node) ComparableResources() *ComparableResources {
  2335  	// Node already has 0.9+ behavior
  2336  	if n.NodeResources != nil {
  2337  		return n.NodeResources.Comparable()
  2338  	}
  2339  
  2340  	// Upgrade path
  2341  	return &ComparableResources{
  2342  		Flattened: AllocatedTaskResources{
  2343  			Cpu: AllocatedCpuResources{
  2344  				CpuShares: int64(n.Resources.CPU),
  2345  			},
  2346  			Memory: AllocatedMemoryResources{
  2347  				MemoryMB: int64(n.Resources.MemoryMB),
  2348  			},
  2349  		},
  2350  		Shared: AllocatedSharedResources{
  2351  			DiskMB: int64(n.Resources.DiskMB),
  2352  		},
  2353  	}
  2354  }
  2355  
  2356  func (n *Node) IsInAnyDC(datacenters []string) bool {
  2357  	for _, dc := range datacenters {
  2358  		if glob.Glob(dc, n.Datacenter) {
  2359  			return true
  2360  		}
  2361  	}
  2362  	return false
  2363  }
  2364  
  2365  // IsInPool returns true if the node is in the pool argument or if the pool
  2366  // argument is the special "all" pool
  2367  func (n *Node) IsInPool(pool string) bool {
  2368  	return pool == NodePoolAll || n.NodePool == pool
  2369  }
  2370  
  2371  // HasEvent returns true if the node has the given message in its events list.
  2372  func (n *Node) HasEvent(msg string) bool {
  2373  	for _, ev := range n.Events {
  2374  		if ev.Message == msg {
  2375  			return true
  2376  		}
  2377  	}
  2378  	return false
  2379  }
  2380  
  2381  // Stub returns a summarized version of the node
  2382  func (n *Node) Stub(fields *NodeStubFields) *NodeListStub {
  2383  
  2384  	addr, _, _ := net.SplitHostPort(n.HTTPAddr)
  2385  
  2386  	s := &NodeListStub{
  2387  		Address:               addr,
  2388  		ID:                    n.ID,
  2389  		Datacenter:            n.Datacenter,
  2390  		Name:                  n.Name,
  2391  		NodeClass:             n.NodeClass,
  2392  		NodePool:              n.NodePool,
  2393  		Version:               n.Attributes["nomad.version"],
  2394  		Drain:                 n.DrainStrategy != nil,
  2395  		SchedulingEligibility: n.SchedulingEligibility,
  2396  		Status:                n.Status,
  2397  		StatusDescription:     n.StatusDescription,
  2398  		Drivers:               n.Drivers,
  2399  		HostVolumes:           n.HostVolumes,
  2400  		LastDrain:             n.LastDrain,
  2401  		CreateIndex:           n.CreateIndex,
  2402  		ModifyIndex:           n.ModifyIndex,
  2403  	}
  2404  
  2405  	if fields != nil {
  2406  		if fields.Resources {
  2407  			s.NodeResources = n.NodeResources
  2408  			s.ReservedResources = n.ReservedResources
  2409  		}
  2410  
  2411  		// Fetch key attributes from the main Attributes map.
  2412  		if fields.OS {
  2413  			m := make(map[string]string)
  2414  			m["os.name"] = n.Attributes["os.name"]
  2415  			s.Attributes = m
  2416  		}
  2417  	}
  2418  
  2419  	return s
  2420  }
  2421  
  2422  // NodeListStub is used to return a subset of job information
  2423  // for the job list
  2424  type NodeListStub struct {
  2425  	Address               string
  2426  	ID                    string
  2427  	Attributes            map[string]string `json:",omitempty"`
  2428  	Datacenter            string
  2429  	Name                  string
  2430  	NodePool              string
  2431  	NodeClass             string
  2432  	Version               string
  2433  	Drain                 bool
  2434  	SchedulingEligibility string
  2435  	Status                string
  2436  	StatusDescription     string
  2437  	Drivers               map[string]*DriverInfo
  2438  	HostVolumes           map[string]*ClientHostVolumeConfig
  2439  	NodeResources         *NodeResources         `json:",omitempty"`
  2440  	ReservedResources     *NodeReservedResources `json:",omitempty"`
  2441  	LastDrain             *DrainMetadata
  2442  	CreateIndex           uint64
  2443  	ModifyIndex           uint64
  2444  }
  2445  
  2446  // NodeStubFields defines which fields are included in the NodeListStub.
  2447  type NodeStubFields struct {
  2448  	Resources bool
  2449  	OS        bool
  2450  }
  2451  
  2452  // Resources is used to define the resources available
  2453  // on a client
  2454  type Resources struct {
  2455  	CPU         int
  2456  	Cores       int
  2457  	MemoryMB    int
  2458  	MemoryMaxMB int
  2459  	DiskMB      int
  2460  	IOPS        int // COMPAT(0.10): Only being used to issue warnings
  2461  	Networks    Networks
  2462  	Devices     ResourceDevices
  2463  }
  2464  
  2465  const (
  2466  	BytesInMegabyte = 1024 * 1024
  2467  )
  2468  
  2469  // DefaultResources is a small resources object that contains the
  2470  // default resources requests that we will provide to an object.
  2471  // ---  THIS FUNCTION IS REPLICATED IN api/resources.go and should
  2472  // be kept in sync.
  2473  func DefaultResources() *Resources {
  2474  	return &Resources{
  2475  		CPU:      100,
  2476  		Cores:    0,
  2477  		MemoryMB: 300,
  2478  	}
  2479  }
  2480  
  2481  // MinResources is a small resources object that contains the
  2482  // absolute minimum resources that we will provide to an object.
  2483  // This should not be confused with the defaults which are
  2484  // provided in Canonicalize() ---  THIS FUNCTION IS REPLICATED IN
  2485  // api/resources.go and should be kept in sync.
  2486  func MinResources() *Resources {
  2487  	return &Resources{
  2488  		CPU:      1,
  2489  		Cores:    0,
  2490  		MemoryMB: 10,
  2491  	}
  2492  }
  2493  
  2494  // DiskInBytes returns the amount of disk resources in bytes.
  2495  func (r *Resources) DiskInBytes() int64 {
  2496  	return int64(r.DiskMB * BytesInMegabyte)
  2497  }
  2498  
  2499  func (r *Resources) Validate() error {
  2500  	var mErr multierror.Error
  2501  
  2502  	if r.Cores > 0 && r.CPU > 0 {
  2503  		mErr.Errors = append(mErr.Errors, errors.New("Task can only ask for 'cpu' or 'cores' resource, not both."))
  2504  	}
  2505  
  2506  	if err := r.MeetsMinResources(); err != nil {
  2507  		mErr.Errors = append(mErr.Errors, err)
  2508  	}
  2509  
  2510  	// Ensure the task isn't asking for disk resources
  2511  	if r.DiskMB > 0 {
  2512  		mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level."))
  2513  	}
  2514  
  2515  	for i, d := range r.Devices {
  2516  		if err := d.Validate(); err != nil {
  2517  			mErr.Errors = append(mErr.Errors, fmt.Errorf("device %d failed validation: %v", i+1, err))
  2518  		}
  2519  	}
  2520  
  2521  	if r.MemoryMaxMB != 0 && r.MemoryMaxMB < r.MemoryMB {
  2522  		mErr.Errors = append(mErr.Errors, fmt.Errorf("MemoryMaxMB value (%d) should be larger than MemoryMB value (%d)", r.MemoryMaxMB, r.MemoryMB))
  2523  	}
  2524  
  2525  	return mErr.ErrorOrNil()
  2526  }
  2527  
  2528  // Merge merges this resource with another resource.
  2529  // COMPAT(0.10): Remove in 0.10
  2530  func (r *Resources) Merge(other *Resources) {
  2531  	if other.CPU != 0 {
  2532  		r.CPU = other.CPU
  2533  	}
  2534  	if other.Cores != 0 {
  2535  		r.Cores = other.Cores
  2536  	}
  2537  	if other.MemoryMB != 0 {
  2538  		r.MemoryMB = other.MemoryMB
  2539  	}
  2540  	if other.MemoryMaxMB != 0 {
  2541  		r.MemoryMaxMB = other.MemoryMaxMB
  2542  	}
  2543  	if other.DiskMB != 0 {
  2544  		r.DiskMB = other.DiskMB
  2545  	}
  2546  	if len(other.Networks) != 0 {
  2547  		r.Networks = other.Networks
  2548  	}
  2549  	if len(other.Devices) != 0 {
  2550  		r.Devices = other.Devices
  2551  	}
  2552  }
  2553  
  2554  // Equal Resources.
  2555  //
  2556  // COMPAT(0.10): Remove in 0.10
  2557  func (r *Resources) Equal(o *Resources) bool {
  2558  	if r == o {
  2559  		return true
  2560  	}
  2561  	if r == nil || o == nil {
  2562  		return false
  2563  	}
  2564  	return r.CPU == o.CPU &&
  2565  		r.Cores == o.Cores &&
  2566  		r.MemoryMB == o.MemoryMB &&
  2567  		r.MemoryMaxMB == o.MemoryMaxMB &&
  2568  		r.DiskMB == o.DiskMB &&
  2569  		r.IOPS == o.IOPS &&
  2570  		r.Networks.Equal(&o.Networks) &&
  2571  		r.Devices.Equal(&o.Devices)
  2572  }
  2573  
  2574  // ResourceDevices are part of Resources.
  2575  //
  2576  // COMPAT(0.10): Remove in 0.10.
  2577  type ResourceDevices []*RequestedDevice
  2578  
  2579  // Equal ResourceDevices as set keyed by Name.
  2580  //
  2581  // COMPAT(0.10): Remove in 0.10
  2582  func (d *ResourceDevices) Equal(o *ResourceDevices) bool {
  2583  	if d == o {
  2584  		return true
  2585  	}
  2586  	if d == nil || o == nil {
  2587  		return false
  2588  	}
  2589  	if len(*d) != len(*o) {
  2590  		return false
  2591  	}
  2592  	m := make(map[string]*RequestedDevice, len(*d))
  2593  	for _, e := range *d {
  2594  		m[e.Name] = e
  2595  	}
  2596  	for _, oe := range *o {
  2597  		de, ok := m[oe.Name]
  2598  		if !ok || !de.Equal(oe) {
  2599  			return false
  2600  		}
  2601  	}
  2602  	return true
  2603  }
  2604  
  2605  // Canonicalize the Resources struct.
  2606  //
  2607  // COMPAT(0.10): Remove in 0.10
  2608  func (r *Resources) Canonicalize() {
  2609  	// Ensure that an empty and nil slices are treated the same to avoid scheduling
  2610  	// problems since we use reflect DeepEquals.
  2611  	if len(r.Networks) == 0 {
  2612  		r.Networks = nil
  2613  	}
  2614  	if len(r.Devices) == 0 {
  2615  		r.Devices = nil
  2616  	}
  2617  
  2618  	for _, n := range r.Networks {
  2619  		n.Canonicalize()
  2620  	}
  2621  }
  2622  
  2623  // MeetsMinResources returns an error if the resources specified are less than
  2624  // the minimum allowed.
  2625  // This is based on the minimums defined in the Resources type
  2626  // COMPAT(0.10): Remove in 0.10
  2627  func (r *Resources) MeetsMinResources() error {
  2628  	var mErr multierror.Error
  2629  	minResources := MinResources()
  2630  	if r.CPU < minResources.CPU && r.Cores == 0 {
  2631  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is %d; got %d", minResources.CPU, r.CPU))
  2632  	}
  2633  	if r.MemoryMB < minResources.MemoryMB {
  2634  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is %d; got %d", minResources.MemoryMB, r.MemoryMB))
  2635  	}
  2636  	return mErr.ErrorOrNil()
  2637  }
  2638  
  2639  // Copy returns a deep copy of the resources
  2640  func (r *Resources) Copy() *Resources {
  2641  	if r == nil {
  2642  		return nil
  2643  	}
  2644  	newR := new(Resources)
  2645  	*newR = *r
  2646  
  2647  	// Copy the network objects
  2648  	newR.Networks = r.Networks.Copy()
  2649  
  2650  	// Copy the devices
  2651  	if r.Devices != nil {
  2652  		n := len(r.Devices)
  2653  		newR.Devices = make([]*RequestedDevice, n)
  2654  		for i := 0; i < n; i++ {
  2655  			newR.Devices[i] = r.Devices[i].Copy()
  2656  		}
  2657  	}
  2658  
  2659  	return newR
  2660  }
  2661  
  2662  // NetIndex finds the matching net index using device name
  2663  // COMPAT(0.10): Remove in 0.10
  2664  func (r *Resources) NetIndex(n *NetworkResource) int {
  2665  	return r.Networks.NetIndex(n)
  2666  }
  2667  
  2668  // Add adds the resources of the delta to this, potentially
  2669  // returning an error if not possible.
  2670  // COMPAT(0.10): Remove in 0.10
  2671  func (r *Resources) Add(delta *Resources) {
  2672  	if delta == nil {
  2673  		return
  2674  	}
  2675  
  2676  	r.CPU += delta.CPU
  2677  	r.MemoryMB += delta.MemoryMB
  2678  	if delta.MemoryMaxMB > 0 {
  2679  		r.MemoryMaxMB += delta.MemoryMaxMB
  2680  	} else {
  2681  		r.MemoryMaxMB += delta.MemoryMB
  2682  	}
  2683  	r.DiskMB += delta.DiskMB
  2684  
  2685  	for _, n := range delta.Networks {
  2686  		// Find the matching interface by IP or CIDR
  2687  		idx := r.NetIndex(n)
  2688  		if idx == -1 {
  2689  			r.Networks = append(r.Networks, n.Copy())
  2690  		} else {
  2691  			r.Networks[idx].Add(n)
  2692  		}
  2693  	}
  2694  }
  2695  
  2696  // GoString returns the string representation of the Resources struct.
  2697  //
  2698  // COMPAT(0.10): Remove in 0.10
  2699  func (r *Resources) GoString() string {
  2700  	return fmt.Sprintf("*%#v", *r)
  2701  }
  2702  
  2703  // NodeNetworkResource is used to describe a fingerprinted network of a node
  2704  type NodeNetworkResource struct {
  2705  	Mode string // host for physical networks, cni/<name> for cni networks
  2706  
  2707  	// The following apply only to host networks
  2708  	Device     string // interface name
  2709  	MacAddress string
  2710  	Speed      int
  2711  
  2712  	Addresses []NodeNetworkAddress // not valid for cni, for bridge there will only be 1 ip
  2713  }
  2714  
  2715  func (n *NodeNetworkResource) Equal(o *NodeNetworkResource) bool {
  2716  	return reflect.DeepEqual(n, o)
  2717  }
  2718  
  2719  func (n *NodeNetworkResource) Copy() *NodeNetworkResource {
  2720  	if n == nil {
  2721  		return nil
  2722  	}
  2723  
  2724  	c := new(NodeNetworkResource)
  2725  	*c = *n
  2726  
  2727  	if n.Addresses != nil {
  2728  		c.Addresses = make([]NodeNetworkAddress, len(n.Addresses))
  2729  		copy(c.Addresses, n.Addresses)
  2730  	}
  2731  
  2732  	return c
  2733  }
  2734  
  2735  func (n *NodeNetworkResource) HasAlias(alias string) bool {
  2736  	for _, addr := range n.Addresses {
  2737  		if addr.Alias == alias {
  2738  			return true
  2739  		}
  2740  	}
  2741  	return false
  2742  }
  2743  
  2744  type NodeNetworkAF string
  2745  
  2746  const (
  2747  	NodeNetworkAF_IPv4 NodeNetworkAF = "ipv4"
  2748  	NodeNetworkAF_IPv6 NodeNetworkAF = "ipv6"
  2749  )
  2750  
  2751  type NodeNetworkAddress struct {
  2752  	Family        NodeNetworkAF
  2753  	Alias         string
  2754  	Address       string
  2755  	ReservedPorts string
  2756  	Gateway       string // default route for this address
  2757  }
  2758  
  2759  type AllocatedPortMapping struct {
  2760  	Label  string
  2761  	Value  int
  2762  	To     int
  2763  	HostIP string
  2764  }
  2765  
  2766  func (m *AllocatedPortMapping) Copy() *AllocatedPortMapping {
  2767  	return &AllocatedPortMapping{
  2768  		Label:  m.Label,
  2769  		Value:  m.Value,
  2770  		To:     m.To,
  2771  		HostIP: m.HostIP,
  2772  	}
  2773  }
  2774  
  2775  func (m *AllocatedPortMapping) Equal(o *AllocatedPortMapping) bool {
  2776  	if m == nil || o == nil {
  2777  		return m == o
  2778  	}
  2779  	switch {
  2780  	case m.Label != o.Label:
  2781  		return false
  2782  	case m.Value != o.Value:
  2783  		return false
  2784  	case m.To != o.To:
  2785  		return false
  2786  	case m.HostIP != o.HostIP:
  2787  		return false
  2788  	}
  2789  	return true
  2790  }
  2791  
  2792  type AllocatedPorts []AllocatedPortMapping
  2793  
  2794  func (p AllocatedPorts) Equal(o AllocatedPorts) bool {
  2795  	return slices.EqualFunc(p, o, func(a, b AllocatedPortMapping) bool {
  2796  		return a.Equal(&b)
  2797  	})
  2798  }
  2799  
  2800  func (p AllocatedPorts) Get(label string) (AllocatedPortMapping, bool) {
  2801  	for _, port := range p {
  2802  		if port.Label == label {
  2803  			return port, true
  2804  		}
  2805  	}
  2806  
  2807  	return AllocatedPortMapping{}, false
  2808  }
  2809  
  2810  type Port struct {
  2811  	// Label is the key for HCL port blocks: port "foo" {}
  2812  	Label string
  2813  
  2814  	// Value is the static or dynamic port value. For dynamic ports this
  2815  	// will be 0 in the jobspec and set by the scheduler.
  2816  	Value int
  2817  
  2818  	// To is the port inside a network namespace where this port is
  2819  	// forwarded. -1 is an internal sentinel value used by Consul Connect
  2820  	// to mean "same as the host port."
  2821  	To int
  2822  
  2823  	// HostNetwork is the name of the network this port should be assigned
  2824  	// to. Jobs with a HostNetwork set can only be placed on nodes with
  2825  	// that host network available.
  2826  	HostNetwork string
  2827  }
  2828  
  2829  type DNSConfig struct {
  2830  	Servers  []string
  2831  	Searches []string
  2832  	Options  []string
  2833  }
  2834  
  2835  func (d *DNSConfig) Equal(o *DNSConfig) bool {
  2836  	if d == nil || o == nil {
  2837  		return d == o
  2838  	}
  2839  
  2840  	switch {
  2841  	case !slices.Equal(d.Servers, o.Servers):
  2842  		return false
  2843  	case !slices.Equal(d.Searches, o.Searches):
  2844  		return false
  2845  	case !slices.Equal(d.Options, o.Options):
  2846  		return false
  2847  	}
  2848  
  2849  	return true
  2850  }
  2851  
  2852  func (d *DNSConfig) Copy() *DNSConfig {
  2853  	if d == nil {
  2854  		return nil
  2855  	}
  2856  	return &DNSConfig{
  2857  		Servers:  slices.Clone(d.Servers),
  2858  		Searches: slices.Clone(d.Searches),
  2859  		Options:  slices.Clone(d.Options),
  2860  	}
  2861  }
  2862  
  2863  func (d *DNSConfig) IsZero() bool {
  2864  	if d == nil {
  2865  		return true
  2866  	}
  2867  	return len(d.Options) == 0 || len(d.Searches) == 0 || len(d.Servers) == 0
  2868  }
  2869  
  2870  // NetworkResource is used to represent available network
  2871  // resources
  2872  type NetworkResource struct {
  2873  	Mode          string     // Mode of the network
  2874  	Device        string     // Name of the device
  2875  	CIDR          string     // CIDR block of addresses
  2876  	IP            string     // Host IP address
  2877  	Hostname      string     `json:",omitempty"` // Hostname of the network namespace
  2878  	MBits         int        // Throughput
  2879  	DNS           *DNSConfig // DNS Configuration
  2880  	ReservedPorts []Port     // Host Reserved ports
  2881  	DynamicPorts  []Port     // Host Dynamically assigned ports
  2882  }
  2883  
  2884  func (n *NetworkResource) Hash() uint32 {
  2885  	var data []byte
  2886  	data = append(data, []byte(fmt.Sprintf("%s%s%s%s%s%d", n.Mode, n.Device, n.CIDR, n.IP, n.Hostname, n.MBits))...)
  2887  
  2888  	for i, port := range n.ReservedPorts {
  2889  		data = append(data, []byte(fmt.Sprintf("r%d%s%d%d", i, port.Label, port.Value, port.To))...)
  2890  	}
  2891  
  2892  	for i, port := range n.DynamicPorts {
  2893  		data = append(data, []byte(fmt.Sprintf("d%d%s%d%d", i, port.Label, port.Value, port.To))...)
  2894  	}
  2895  
  2896  	return crc32.ChecksumIEEE(data)
  2897  }
  2898  
  2899  func (n *NetworkResource) Equal(other *NetworkResource) bool {
  2900  	return n.Hash() == other.Hash()
  2901  }
  2902  
  2903  func (n *NetworkResource) Canonicalize() {
  2904  	// Ensure that an empty and nil slices are treated the same to avoid scheduling
  2905  	// problems since we use reflect DeepEquals.
  2906  	if len(n.ReservedPorts) == 0 {
  2907  		n.ReservedPorts = nil
  2908  	}
  2909  	if len(n.DynamicPorts) == 0 {
  2910  		n.DynamicPorts = nil
  2911  	}
  2912  
  2913  	for i, p := range n.DynamicPorts {
  2914  		if p.HostNetwork == "" {
  2915  			n.DynamicPorts[i].HostNetwork = "default"
  2916  		}
  2917  	}
  2918  	for i, p := range n.ReservedPorts {
  2919  		if p.HostNetwork == "" {
  2920  			n.ReservedPorts[i].HostNetwork = "default"
  2921  		}
  2922  	}
  2923  }
  2924  
  2925  // Copy returns a deep copy of the network resource
  2926  func (n *NetworkResource) Copy() *NetworkResource {
  2927  	if n == nil {
  2928  		return nil
  2929  	}
  2930  	newR := new(NetworkResource)
  2931  	*newR = *n
  2932  	newR.DNS = n.DNS.Copy()
  2933  	if n.ReservedPorts != nil {
  2934  		newR.ReservedPorts = make([]Port, len(n.ReservedPorts))
  2935  		copy(newR.ReservedPorts, n.ReservedPorts)
  2936  	}
  2937  	if n.DynamicPorts != nil {
  2938  		newR.DynamicPorts = make([]Port, len(n.DynamicPorts))
  2939  		copy(newR.DynamicPorts, n.DynamicPorts)
  2940  	}
  2941  	return newR
  2942  }
  2943  
  2944  // Add adds the resources of the delta to this, potentially
  2945  // returning an error if not possible.
  2946  func (n *NetworkResource) Add(delta *NetworkResource) {
  2947  	if len(delta.ReservedPorts) > 0 {
  2948  		n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...)
  2949  	}
  2950  	n.MBits += delta.MBits
  2951  	n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...)
  2952  }
  2953  
  2954  func (n *NetworkResource) GoString() string {
  2955  	return fmt.Sprintf("*%#v", *n)
  2956  }
  2957  
  2958  // PortLabels returns a map of port labels to their assigned host ports.
  2959  func (n *NetworkResource) PortLabels() map[string]int {
  2960  	num := len(n.ReservedPorts) + len(n.DynamicPorts)
  2961  	labelValues := make(map[string]int, num)
  2962  	for _, port := range n.ReservedPorts {
  2963  		labelValues[port.Label] = port.Value
  2964  	}
  2965  	for _, port := range n.DynamicPorts {
  2966  		labelValues[port.Label] = port.Value
  2967  	}
  2968  	return labelValues
  2969  }
  2970  
  2971  // Networks defined for a task on the Resources struct.
  2972  type Networks []*NetworkResource
  2973  
  2974  func (ns Networks) Copy() Networks {
  2975  	if len(ns) == 0 {
  2976  		return nil
  2977  	}
  2978  
  2979  	out := make([]*NetworkResource, len(ns))
  2980  	for i := range ns {
  2981  		out[i] = ns[i].Copy()
  2982  	}
  2983  	return out
  2984  }
  2985  
  2986  // Port assignment and IP for the given label or empty values.
  2987  func (ns Networks) Port(label string) AllocatedPortMapping {
  2988  	for _, n := range ns {
  2989  		for _, p := range n.ReservedPorts {
  2990  			if p.Label == label {
  2991  				return AllocatedPortMapping{
  2992  					Label:  label,
  2993  					Value:  p.Value,
  2994  					To:     p.To,
  2995  					HostIP: n.IP,
  2996  				}
  2997  			}
  2998  		}
  2999  		for _, p := range n.DynamicPorts {
  3000  			if p.Label == label {
  3001  				return AllocatedPortMapping{
  3002  					Label:  label,
  3003  					Value:  p.Value,
  3004  					To:     p.To,
  3005  					HostIP: n.IP,
  3006  				}
  3007  			}
  3008  		}
  3009  	}
  3010  	return AllocatedPortMapping{}
  3011  }
  3012  
  3013  func (ns Networks) NetIndex(n *NetworkResource) int {
  3014  	for idx, net := range ns {
  3015  		if net.Device == n.Device {
  3016  			return idx
  3017  		}
  3018  	}
  3019  	return -1
  3020  }
  3021  
  3022  // RequestedDevice is used to request a device for a task.
  3023  type RequestedDevice struct {
  3024  	// Name is the request name. The possible values are as follows:
  3025  	// * <type>: A single value only specifies the type of request.
  3026  	// * <vendor>/<type>: A single slash delimiter assumes the vendor and type of device is specified.
  3027  	// * <vendor>/<type>/<name>: Two slash delimiters assume vendor, type and specific model are specified.
  3028  	//
  3029  	// Examples are as follows:
  3030  	// * "gpu"
  3031  	// * "nvidia/gpu"
  3032  	// * "nvidia/gpu/GTX2080Ti"
  3033  	Name string
  3034  
  3035  	// Count is the number of requested devices
  3036  	Count uint64
  3037  
  3038  	// Constraints are a set of constraints to apply when selecting the device
  3039  	// to use.
  3040  	Constraints Constraints
  3041  
  3042  	// Affinities are a set of affinities to apply when selecting the device
  3043  	// to use.
  3044  	Affinities Affinities
  3045  }
  3046  
  3047  func (r *RequestedDevice) Equal(o *RequestedDevice) bool {
  3048  	if r == o {
  3049  		return true
  3050  	}
  3051  	if r == nil || o == nil {
  3052  		return false
  3053  	}
  3054  	return r.Name == o.Name &&
  3055  		r.Count == o.Count &&
  3056  		r.Constraints.Equal(&o.Constraints) &&
  3057  		r.Affinities.Equal(&o.Affinities)
  3058  }
  3059  
  3060  func (r *RequestedDevice) Copy() *RequestedDevice {
  3061  	if r == nil {
  3062  		return nil
  3063  	}
  3064  
  3065  	nr := *r
  3066  	nr.Constraints = CopySliceConstraints(nr.Constraints)
  3067  	nr.Affinities = CopySliceAffinities(nr.Affinities)
  3068  
  3069  	return &nr
  3070  }
  3071  
  3072  func (r *RequestedDevice) ID() *DeviceIdTuple {
  3073  	if r == nil || r.Name == "" {
  3074  		return nil
  3075  	}
  3076  
  3077  	parts := strings.SplitN(r.Name, "/", 3)
  3078  	switch len(parts) {
  3079  	case 1:
  3080  		return &DeviceIdTuple{
  3081  			Type: parts[0],
  3082  		}
  3083  	case 2:
  3084  		return &DeviceIdTuple{
  3085  			Vendor: parts[0],
  3086  			Type:   parts[1],
  3087  		}
  3088  	default:
  3089  		return &DeviceIdTuple{
  3090  			Vendor: parts[0],
  3091  			Type:   parts[1],
  3092  			Name:   parts[2],
  3093  		}
  3094  	}
  3095  }
  3096  
  3097  func (r *RequestedDevice) Validate() error {
  3098  	if r == nil {
  3099  		return nil
  3100  	}
  3101  
  3102  	var mErr multierror.Error
  3103  	if r.Name == "" {
  3104  		_ = multierror.Append(&mErr, errors.New("device name must be given as one of the following: type, vendor/type, or vendor/type/name"))
  3105  	}
  3106  
  3107  	for idx, constr := range r.Constraints {
  3108  		// Ensure that the constraint doesn't use an operand we do not allow
  3109  		switch constr.Operand {
  3110  		case ConstraintDistinctHosts, ConstraintDistinctProperty:
  3111  			outer := fmt.Errorf("Constraint %d validation failed: using unsupported operand %q", idx+1, constr.Operand)
  3112  			_ = multierror.Append(&mErr, outer)
  3113  		default:
  3114  			if err := constr.Validate(); err != nil {
  3115  				outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
  3116  				_ = multierror.Append(&mErr, outer)
  3117  			}
  3118  		}
  3119  	}
  3120  	for idx, affinity := range r.Affinities {
  3121  		if err := affinity.Validate(); err != nil {
  3122  			outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
  3123  			_ = multierror.Append(&mErr, outer)
  3124  		}
  3125  	}
  3126  
  3127  	return mErr.ErrorOrNil()
  3128  }
  3129  
  3130  // NodeResources is used to define the resources available on a client node.
  3131  type NodeResources struct {
  3132  	Cpu     NodeCpuResources
  3133  	Memory  NodeMemoryResources
  3134  	Disk    NodeDiskResources
  3135  	Devices []*NodeDeviceResource
  3136  
  3137  	// NodeNetworks was added in Nomad 0.12 to support multiple interfaces.
  3138  	// It is the superset of host_networks, fingerprinted networks, and the
  3139  	// node's default interface.
  3140  	NodeNetworks []*NodeNetworkResource
  3141  
  3142  	// Networks is the node's bridge network and default interface. It is
  3143  	// only used when scheduling jobs with a deprecated
  3144  	// task.resources.network block.
  3145  	Networks Networks
  3146  
  3147  	// MinDynamicPort and MaxDynamicPort represent the inclusive port range
  3148  	// to select dynamic ports from across all networks.
  3149  	MinDynamicPort int
  3150  	MaxDynamicPort int
  3151  }
  3152  
  3153  func (n *NodeResources) Copy() *NodeResources {
  3154  	if n == nil {
  3155  		return nil
  3156  	}
  3157  
  3158  	newN := new(NodeResources)
  3159  	*newN = *n
  3160  	newN.Cpu = n.Cpu.Copy()
  3161  	newN.Networks = n.Networks.Copy()
  3162  
  3163  	if n.NodeNetworks != nil {
  3164  		newN.NodeNetworks = make([]*NodeNetworkResource, len(n.NodeNetworks))
  3165  		for i, nn := range n.NodeNetworks {
  3166  			newN.NodeNetworks[i] = nn.Copy()
  3167  		}
  3168  	}
  3169  
  3170  	// Copy the devices
  3171  	if n.Devices != nil {
  3172  		devices := len(n.Devices)
  3173  		newN.Devices = make([]*NodeDeviceResource, devices)
  3174  		for i := 0; i < devices; i++ {
  3175  			newN.Devices[i] = n.Devices[i].Copy()
  3176  		}
  3177  	}
  3178  
  3179  	return newN
  3180  }
  3181  
  3182  // Comparable returns a comparable version of the nodes resources. This
  3183  // conversion can be lossy so care must be taken when using it.
  3184  func (n *NodeResources) Comparable() *ComparableResources {
  3185  	if n == nil {
  3186  		return nil
  3187  	}
  3188  
  3189  	c := &ComparableResources{
  3190  		Flattened: AllocatedTaskResources{
  3191  			Cpu: AllocatedCpuResources{
  3192  				CpuShares:     n.Cpu.CpuShares,
  3193  				ReservedCores: n.Cpu.ReservableCpuCores,
  3194  			},
  3195  			Memory: AllocatedMemoryResources{
  3196  				MemoryMB: n.Memory.MemoryMB,
  3197  			},
  3198  			Networks: n.Networks,
  3199  		},
  3200  		Shared: AllocatedSharedResources{
  3201  			DiskMB: n.Disk.DiskMB,
  3202  		},
  3203  	}
  3204  	return c
  3205  }
  3206  
  3207  func (n *NodeResources) Merge(o *NodeResources) {
  3208  	if o == nil {
  3209  		return
  3210  	}
  3211  
  3212  	n.Cpu.Merge(&o.Cpu)
  3213  	n.Memory.Merge(&o.Memory)
  3214  	n.Disk.Merge(&o.Disk)
  3215  
  3216  	if len(o.Networks) != 0 {
  3217  		n.Networks = append(n.Networks, o.Networks...)
  3218  	}
  3219  
  3220  	if len(o.Devices) != 0 {
  3221  		n.Devices = o.Devices
  3222  	}
  3223  
  3224  	if len(o.NodeNetworks) != 0 {
  3225  		for _, nw := range o.NodeNetworks {
  3226  			if i, nnw := lookupNetworkByDevice(n.NodeNetworks, nw.Device); nnw != nil {
  3227  				n.NodeNetworks[i] = nw
  3228  			} else {
  3229  				n.NodeNetworks = append(n.NodeNetworks, nw)
  3230  			}
  3231  		}
  3232  	}
  3233  }
  3234  
  3235  func lookupNetworkByDevice(nets []*NodeNetworkResource, name string) (int, *NodeNetworkResource) {
  3236  	for i, nw := range nets {
  3237  		if nw.Device == name {
  3238  			return i, nw
  3239  		}
  3240  	}
  3241  	return 0, nil
  3242  }
  3243  
  3244  func (n *NodeResources) Equal(o *NodeResources) bool {
  3245  	if o == nil && n == nil {
  3246  		return true
  3247  	} else if o == nil {
  3248  		return false
  3249  	} else if n == nil {
  3250  		return false
  3251  	}
  3252  
  3253  	if !n.Cpu.Equal(&o.Cpu) {
  3254  		return false
  3255  	}
  3256  	if !n.Memory.Equal(&o.Memory) {
  3257  		return false
  3258  	}
  3259  	if !n.Disk.Equal(&o.Disk) {
  3260  		return false
  3261  	}
  3262  	if !n.Networks.Equal(&o.Networks) {
  3263  		return false
  3264  	}
  3265  
  3266  	// Check the devices
  3267  	if !DevicesEquals(n.Devices, o.Devices) {
  3268  		return false
  3269  	}
  3270  
  3271  	if !NodeNetworksEquals(n.NodeNetworks, o.NodeNetworks) {
  3272  		return false
  3273  	}
  3274  
  3275  	return true
  3276  }
  3277  
  3278  // Equal equates Networks as a set
  3279  func (ns *Networks) Equal(o *Networks) bool {
  3280  	if ns == o {
  3281  		return true
  3282  	}
  3283  	if ns == nil || o == nil {
  3284  		return false
  3285  	}
  3286  	if len(*ns) != len(*o) {
  3287  		return false
  3288  	}
  3289  SETEQUALS:
  3290  	for _, ne := range *ns {
  3291  		for _, oe := range *o {
  3292  			if ne.Equal(oe) {
  3293  				continue SETEQUALS
  3294  			}
  3295  		}
  3296  		return false
  3297  	}
  3298  	return true
  3299  }
  3300  
  3301  // DevicesEquals returns true if the two device arrays are set equal
  3302  func DevicesEquals(d1, d2 []*NodeDeviceResource) bool {
  3303  	if len(d1) != len(d2) {
  3304  		return false
  3305  	}
  3306  	idMap := make(map[DeviceIdTuple]*NodeDeviceResource, len(d1))
  3307  	for _, d := range d1 {
  3308  		idMap[*d.ID()] = d
  3309  	}
  3310  	for _, otherD := range d2 {
  3311  		if d, ok := idMap[*otherD.ID()]; !ok || !d.Equal(otherD) {
  3312  			return false
  3313  		}
  3314  	}
  3315  
  3316  	return true
  3317  }
  3318  
  3319  func NodeNetworksEquals(n1, n2 []*NodeNetworkResource) bool {
  3320  	if len(n1) != len(n2) {
  3321  		return false
  3322  	}
  3323  
  3324  	netMap := make(map[string]*NodeNetworkResource, len(n1))
  3325  	for _, n := range n1 {
  3326  		netMap[n.Device] = n
  3327  	}
  3328  	for _, otherN := range n2 {
  3329  		if n, ok := netMap[otherN.Device]; !ok || !n.Equal(otherN) {
  3330  			return false
  3331  		}
  3332  	}
  3333  
  3334  	return true
  3335  
  3336  }
  3337  
  3338  // NodeCpuResources captures the CPU resources of the node.
  3339  type NodeCpuResources struct {
  3340  	// CpuShares is the CPU shares available. This is calculated by number of
  3341  	// cores multiplied by the core frequency.
  3342  	CpuShares int64
  3343  
  3344  	// TotalCpuCores is the total number of cores on the machine. This includes cores not in
  3345  	// the agent's cpuset if on a linux platform
  3346  	TotalCpuCores uint16
  3347  
  3348  	// ReservableCpuCores is the set of cpus which are available to be reserved on the Node.
  3349  	// This value is currently only reported on Linux platforms which support cgroups and is
  3350  	// discovered by inspecting the cpuset of the agent's cgroup.
  3351  	ReservableCpuCores []uint16
  3352  }
  3353  
  3354  func (n NodeCpuResources) Copy() NodeCpuResources {
  3355  	newN := n
  3356  	if n.ReservableCpuCores != nil {
  3357  		newN.ReservableCpuCores = make([]uint16, len(n.ReservableCpuCores))
  3358  		copy(newN.ReservableCpuCores, n.ReservableCpuCores)
  3359  	}
  3360  
  3361  	return newN
  3362  }
  3363  
  3364  func (n *NodeCpuResources) Merge(o *NodeCpuResources) {
  3365  	if o == nil {
  3366  		return
  3367  	}
  3368  
  3369  	if o.CpuShares != 0 {
  3370  		n.CpuShares = o.CpuShares
  3371  	}
  3372  
  3373  	if o.TotalCpuCores != 0 {
  3374  		n.TotalCpuCores = o.TotalCpuCores
  3375  	}
  3376  
  3377  	if len(o.ReservableCpuCores) != 0 {
  3378  		n.ReservableCpuCores = o.ReservableCpuCores
  3379  	}
  3380  }
  3381  
  3382  func (n *NodeCpuResources) Equal(o *NodeCpuResources) bool {
  3383  	if o == nil && n == nil {
  3384  		return true
  3385  	} else if o == nil {
  3386  		return false
  3387  	} else if n == nil {
  3388  		return false
  3389  	}
  3390  
  3391  	if n.CpuShares != o.CpuShares {
  3392  		return false
  3393  	}
  3394  
  3395  	if n.TotalCpuCores != o.TotalCpuCores {
  3396  		return false
  3397  	}
  3398  
  3399  	if len(n.ReservableCpuCores) != len(o.ReservableCpuCores) {
  3400  		return false
  3401  	}
  3402  	for i := range n.ReservableCpuCores {
  3403  		if n.ReservableCpuCores[i] != o.ReservableCpuCores[i] {
  3404  			return false
  3405  		}
  3406  	}
  3407  	return true
  3408  }
  3409  
  3410  func (n *NodeCpuResources) SharesPerCore() int64 {
  3411  	return n.CpuShares / int64(n.TotalCpuCores)
  3412  }
  3413  
  3414  // NodeMemoryResources captures the memory resources of the node
  3415  type NodeMemoryResources struct {
  3416  	// MemoryMB is the total available memory on the node
  3417  	MemoryMB int64
  3418  }
  3419  
  3420  func (n *NodeMemoryResources) Merge(o *NodeMemoryResources) {
  3421  	if o == nil {
  3422  		return
  3423  	}
  3424  
  3425  	if o.MemoryMB != 0 {
  3426  		n.MemoryMB = o.MemoryMB
  3427  	}
  3428  }
  3429  
  3430  func (n *NodeMemoryResources) Equal(o *NodeMemoryResources) bool {
  3431  	if o == nil && n == nil {
  3432  		return true
  3433  	} else if o == nil {
  3434  		return false
  3435  	} else if n == nil {
  3436  		return false
  3437  	}
  3438  
  3439  	if n.MemoryMB != o.MemoryMB {
  3440  		return false
  3441  	}
  3442  
  3443  	return true
  3444  }
  3445  
  3446  // NodeDiskResources captures the disk resources of the node
  3447  type NodeDiskResources struct {
  3448  	// DiskMB is the total available disk space on the node
  3449  	DiskMB int64
  3450  }
  3451  
  3452  func (n *NodeDiskResources) Merge(o *NodeDiskResources) {
  3453  	if o == nil {
  3454  		return
  3455  	}
  3456  	if o.DiskMB != 0 {
  3457  		n.DiskMB = o.DiskMB
  3458  	}
  3459  }
  3460  
  3461  func (n *NodeDiskResources) Equal(o *NodeDiskResources) bool {
  3462  	if o == nil && n == nil {
  3463  		return true
  3464  	} else if o == nil {
  3465  		return false
  3466  	} else if n == nil {
  3467  		return false
  3468  	}
  3469  
  3470  	if n.DiskMB != o.DiskMB {
  3471  		return false
  3472  	}
  3473  
  3474  	return true
  3475  }
  3476  
  3477  // DeviceIdTuple is the tuple that identifies a device
  3478  type DeviceIdTuple struct {
  3479  	Vendor string
  3480  	Type   string
  3481  	Name   string
  3482  }
  3483  
  3484  func (id *DeviceIdTuple) String() string {
  3485  	if id == nil {
  3486  		return ""
  3487  	}
  3488  
  3489  	return fmt.Sprintf("%s/%s/%s", id.Vendor, id.Type, id.Name)
  3490  }
  3491  
  3492  // Matches returns if this Device ID is a superset of the passed ID.
  3493  func (id *DeviceIdTuple) Matches(other *DeviceIdTuple) bool {
  3494  	if other == nil {
  3495  		return false
  3496  	}
  3497  
  3498  	if other.Name != "" && other.Name != id.Name {
  3499  		return false
  3500  	}
  3501  
  3502  	if other.Vendor != "" && other.Vendor != id.Vendor {
  3503  		return false
  3504  	}
  3505  
  3506  	if other.Type != "" && other.Type != id.Type {
  3507  		return false
  3508  	}
  3509  
  3510  	return true
  3511  }
  3512  
  3513  // Equal returns if this Device ID is the same as the passed ID.
  3514  func (id *DeviceIdTuple) Equal(o *DeviceIdTuple) bool {
  3515  	if id == nil && o == nil {
  3516  		return true
  3517  	} else if id == nil || o == nil {
  3518  		return false
  3519  	}
  3520  
  3521  	return o.Vendor == id.Vendor && o.Type == id.Type && o.Name == id.Name
  3522  }
  3523  
  3524  // NodeDeviceResource captures a set of devices sharing a common
  3525  // vendor/type/device_name tuple.
  3526  type NodeDeviceResource struct {
  3527  	Vendor     string
  3528  	Type       string
  3529  	Name       string
  3530  	Instances  []*NodeDevice
  3531  	Attributes map[string]*psstructs.Attribute
  3532  }
  3533  
  3534  func (n *NodeDeviceResource) ID() *DeviceIdTuple {
  3535  	if n == nil {
  3536  		return nil
  3537  	}
  3538  
  3539  	return &DeviceIdTuple{
  3540  		Vendor: n.Vendor,
  3541  		Type:   n.Type,
  3542  		Name:   n.Name,
  3543  	}
  3544  }
  3545  
  3546  func (n *NodeDeviceResource) Copy() *NodeDeviceResource {
  3547  	if n == nil {
  3548  		return nil
  3549  	}
  3550  
  3551  	// Copy the primitives
  3552  	nn := *n
  3553  
  3554  	// Copy the device instances
  3555  	if l := len(nn.Instances); l != 0 {
  3556  		nn.Instances = make([]*NodeDevice, 0, l)
  3557  		for _, d := range n.Instances {
  3558  			nn.Instances = append(nn.Instances, d.Copy())
  3559  		}
  3560  	}
  3561  
  3562  	// Copy the Attributes
  3563  	nn.Attributes = psstructs.CopyMapStringAttribute(nn.Attributes)
  3564  
  3565  	return &nn
  3566  }
  3567  
  3568  func (n *NodeDeviceResource) Equal(o *NodeDeviceResource) bool {
  3569  	if o == nil && n == nil {
  3570  		return true
  3571  	} else if o == nil {
  3572  		return false
  3573  	} else if n == nil {
  3574  		return false
  3575  	}
  3576  
  3577  	if n.Vendor != o.Vendor {
  3578  		return false
  3579  	} else if n.Type != o.Type {
  3580  		return false
  3581  	} else if n.Name != o.Name {
  3582  		return false
  3583  	}
  3584  
  3585  	// Check the attributes
  3586  	if len(n.Attributes) != len(o.Attributes) {
  3587  		return false
  3588  	}
  3589  	for k, v := range n.Attributes {
  3590  		if otherV, ok := o.Attributes[k]; !ok || v != otherV {
  3591  			return false
  3592  		}
  3593  	}
  3594  
  3595  	// Check the instances
  3596  	if len(n.Instances) != len(o.Instances) {
  3597  		return false
  3598  	}
  3599  	idMap := make(map[string]*NodeDevice, len(n.Instances))
  3600  	for _, d := range n.Instances {
  3601  		idMap[d.ID] = d
  3602  	}
  3603  	for _, otherD := range o.Instances {
  3604  		if d, ok := idMap[otherD.ID]; !ok || !d.Equal(otherD) {
  3605  			return false
  3606  		}
  3607  	}
  3608  
  3609  	return true
  3610  }
  3611  
  3612  // NodeDevice is an instance of a particular device.
  3613  type NodeDevice struct {
  3614  	// ID is the ID of the device.
  3615  	ID string
  3616  
  3617  	// Healthy captures whether the device is healthy.
  3618  	Healthy bool
  3619  
  3620  	// HealthDescription is used to provide a human readable description of why
  3621  	// the device may be unhealthy.
  3622  	HealthDescription string
  3623  
  3624  	// Locality stores HW locality information for the node to optionally be
  3625  	// used when making placement decisions.
  3626  	Locality *NodeDeviceLocality
  3627  }
  3628  
  3629  func (n *NodeDevice) Equal(o *NodeDevice) bool {
  3630  	if o == nil && n == nil {
  3631  		return true
  3632  	} else if o == nil {
  3633  		return false
  3634  	} else if n == nil {
  3635  		return false
  3636  	}
  3637  
  3638  	if n.ID != o.ID {
  3639  		return false
  3640  	} else if n.Healthy != o.Healthy {
  3641  		return false
  3642  	} else if n.HealthDescription != o.HealthDescription {
  3643  		return false
  3644  	} else if !n.Locality.Equal(o.Locality) {
  3645  		return false
  3646  	}
  3647  
  3648  	return false
  3649  }
  3650  
  3651  func (n *NodeDevice) Copy() *NodeDevice {
  3652  	if n == nil {
  3653  		return nil
  3654  	}
  3655  
  3656  	// Copy the primitives
  3657  	nn := *n
  3658  
  3659  	// Copy the locality
  3660  	nn.Locality = nn.Locality.Copy()
  3661  
  3662  	return &nn
  3663  }
  3664  
  3665  // NodeDeviceLocality stores information about the devices hardware locality on
  3666  // the node.
  3667  type NodeDeviceLocality struct {
  3668  	// PciBusID is the PCI Bus ID for the device.
  3669  	PciBusID string
  3670  }
  3671  
  3672  func (n *NodeDeviceLocality) Equal(o *NodeDeviceLocality) bool {
  3673  	if o == nil && n == nil {
  3674  		return true
  3675  	} else if o == nil {
  3676  		return false
  3677  	} else if n == nil {
  3678  		return false
  3679  	}
  3680  
  3681  	if n.PciBusID != o.PciBusID {
  3682  		return false
  3683  	}
  3684  
  3685  	return true
  3686  }
  3687  
  3688  func (n *NodeDeviceLocality) Copy() *NodeDeviceLocality {
  3689  	if n == nil {
  3690  		return nil
  3691  	}
  3692  
  3693  	// Copy the primitives
  3694  	nn := *n
  3695  	return &nn
  3696  }
  3697  
  3698  // NodeReservedResources is used to capture the resources on a client node that
  3699  // should be reserved and not made available to jobs.
  3700  type NodeReservedResources struct {
  3701  	Cpu      NodeReservedCpuResources
  3702  	Memory   NodeReservedMemoryResources
  3703  	Disk     NodeReservedDiskResources
  3704  	Networks NodeReservedNetworkResources
  3705  }
  3706  
  3707  func (n *NodeReservedResources) Copy() *NodeReservedResources {
  3708  	if n == nil {
  3709  		return nil
  3710  	}
  3711  	newN := new(NodeReservedResources)
  3712  	*newN = *n
  3713  	return newN
  3714  }
  3715  
  3716  // Comparable returns a comparable version of the node's reserved resources. The
  3717  // returned resources doesn't contain any network information. This conversion
  3718  // can be lossy so care must be taken when using it.
  3719  func (n *NodeReservedResources) Comparable() *ComparableResources {
  3720  	if n == nil {
  3721  		return nil
  3722  	}
  3723  
  3724  	c := &ComparableResources{
  3725  		Flattened: AllocatedTaskResources{
  3726  			Cpu: AllocatedCpuResources{
  3727  				CpuShares:     n.Cpu.CpuShares,
  3728  				ReservedCores: n.Cpu.ReservedCpuCores,
  3729  			},
  3730  			Memory: AllocatedMemoryResources{
  3731  				MemoryMB: n.Memory.MemoryMB,
  3732  			},
  3733  		},
  3734  		Shared: AllocatedSharedResources{
  3735  			DiskMB: n.Disk.DiskMB,
  3736  		},
  3737  	}
  3738  	return c
  3739  }
  3740  
  3741  // NodeReservedCpuResources captures the reserved CPU resources of the node.
  3742  type NodeReservedCpuResources struct {
  3743  	CpuShares        int64
  3744  	ReservedCpuCores []uint16
  3745  }
  3746  
  3747  // NodeReservedMemoryResources captures the reserved memory resources of the node.
  3748  type NodeReservedMemoryResources struct {
  3749  	MemoryMB int64
  3750  }
  3751  
  3752  // NodeReservedDiskResources captures the reserved disk resources of the node.
  3753  type NodeReservedDiskResources struct {
  3754  	DiskMB int64
  3755  }
  3756  
  3757  // NodeReservedNetworkResources captures the reserved network resources of the node.
  3758  type NodeReservedNetworkResources struct {
  3759  	// ReservedHostPorts is the set of ports reserved on all host network
  3760  	// interfaces. Its format is a comma separate list of integers or integer
  3761  	// ranges. (80,443,1000-2000,2005)
  3762  	ReservedHostPorts string
  3763  }
  3764  
  3765  // ParseReservedHostPorts returns the reserved host ports.
  3766  func (n *NodeReservedNetworkResources) ParseReservedHostPorts() ([]uint64, error) {
  3767  	return ParsePortRanges(n.ReservedHostPorts)
  3768  }
  3769  
  3770  // AllocatedResources is the set of resources to be used by an allocation.
  3771  type AllocatedResources struct {
  3772  	// Tasks is a mapping of task name to the resources for the task.
  3773  	Tasks          map[string]*AllocatedTaskResources
  3774  	TaskLifecycles map[string]*TaskLifecycleConfig
  3775  
  3776  	// Shared is the set of resource that are shared by all tasks in the group.
  3777  	Shared AllocatedSharedResources
  3778  }
  3779  
  3780  func (a *AllocatedResources) Copy() *AllocatedResources {
  3781  	if a == nil {
  3782  		return nil
  3783  	}
  3784  
  3785  	out := AllocatedResources{
  3786  		Shared: a.Shared.Copy(),
  3787  	}
  3788  
  3789  	if a.Tasks != nil {
  3790  		out.Tasks = make(map[string]*AllocatedTaskResources, len(out.Tasks))
  3791  		for task, resource := range a.Tasks {
  3792  			out.Tasks[task] = resource.Copy()
  3793  		}
  3794  	}
  3795  	if a.TaskLifecycles != nil {
  3796  		out.TaskLifecycles = make(map[string]*TaskLifecycleConfig, len(out.TaskLifecycles))
  3797  		for task, lifecycle := range a.TaskLifecycles {
  3798  			out.TaskLifecycles[task] = lifecycle.Copy()
  3799  		}
  3800  
  3801  	}
  3802  
  3803  	return &out
  3804  }
  3805  
  3806  // Comparable returns a comparable version of the allocations allocated
  3807  // resources. This conversion can be lossy so care must be taken when using it.
  3808  func (a *AllocatedResources) Comparable() *ComparableResources {
  3809  	if a == nil {
  3810  		return nil
  3811  	}
  3812  
  3813  	c := &ComparableResources{
  3814  		Shared: a.Shared,
  3815  	}
  3816  
  3817  	prestartSidecarTasks := &AllocatedTaskResources{}
  3818  	prestartEphemeralTasks := &AllocatedTaskResources{}
  3819  	main := &AllocatedTaskResources{}
  3820  	poststopTasks := &AllocatedTaskResources{}
  3821  
  3822  	for taskName, r := range a.Tasks {
  3823  		lc := a.TaskLifecycles[taskName]
  3824  		if lc == nil {
  3825  			main.Add(r)
  3826  		} else if lc.Hook == TaskLifecycleHookPrestart {
  3827  			if lc.Sidecar {
  3828  				prestartSidecarTasks.Add(r)
  3829  			} else {
  3830  				prestartEphemeralTasks.Add(r)
  3831  			}
  3832  		} else if lc.Hook == TaskLifecycleHookPoststop {
  3833  			poststopTasks.Add(r)
  3834  		}
  3835  	}
  3836  
  3837  	// update this loop to account for lifecycle hook
  3838  	prestartEphemeralTasks.Max(main)
  3839  	prestartEphemeralTasks.Max(poststopTasks)
  3840  	prestartSidecarTasks.Add(prestartEphemeralTasks)
  3841  	c.Flattened.Add(prestartSidecarTasks)
  3842  
  3843  	// Add network resources that are at the task group level
  3844  	for _, network := range a.Shared.Networks {
  3845  		c.Flattened.Add(&AllocatedTaskResources{
  3846  			Networks: []*NetworkResource{network},
  3847  		})
  3848  	}
  3849  
  3850  	return c
  3851  }
  3852  
  3853  // OldTaskResources returns the pre-0.9.0 map of task resources
  3854  func (a *AllocatedResources) OldTaskResources() map[string]*Resources {
  3855  	m := make(map[string]*Resources, len(a.Tasks))
  3856  	for name, res := range a.Tasks {
  3857  		m[name] = &Resources{
  3858  			CPU:         int(res.Cpu.CpuShares),
  3859  			MemoryMB:    int(res.Memory.MemoryMB),
  3860  			MemoryMaxMB: int(res.Memory.MemoryMaxMB),
  3861  			Networks:    res.Networks,
  3862  		}
  3863  	}
  3864  
  3865  	return m
  3866  }
  3867  
  3868  func (a *AllocatedResources) Canonicalize() {
  3869  	a.Shared.Canonicalize()
  3870  
  3871  	for _, r := range a.Tasks {
  3872  		for _, nw := range r.Networks {
  3873  			for _, port := range append(nw.DynamicPorts, nw.ReservedPorts...) {
  3874  				a.Shared.Ports = append(a.Shared.Ports, AllocatedPortMapping{
  3875  					Label:  port.Label,
  3876  					Value:  port.Value,
  3877  					To:     port.To,
  3878  					HostIP: nw.IP,
  3879  				})
  3880  			}
  3881  		}
  3882  	}
  3883  }
  3884  
  3885  // AllocatedTaskResources are the set of resources allocated to a task.
  3886  type AllocatedTaskResources struct {
  3887  	Cpu      AllocatedCpuResources
  3888  	Memory   AllocatedMemoryResources
  3889  	Networks Networks
  3890  	Devices  []*AllocatedDeviceResource
  3891  }
  3892  
  3893  func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources {
  3894  	if a == nil {
  3895  		return nil
  3896  	}
  3897  	newA := new(AllocatedTaskResources)
  3898  	*newA = *a
  3899  
  3900  	// Copy the networks
  3901  	newA.Networks = a.Networks.Copy()
  3902  
  3903  	// Copy the devices
  3904  	if newA.Devices != nil {
  3905  		n := len(a.Devices)
  3906  		newA.Devices = make([]*AllocatedDeviceResource, n)
  3907  		for i := 0; i < n; i++ {
  3908  			newA.Devices[i] = a.Devices[i].Copy()
  3909  		}
  3910  	}
  3911  
  3912  	return newA
  3913  }
  3914  
  3915  // NetIndex finds the matching net index using device name
  3916  func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int {
  3917  	return a.Networks.NetIndex(n)
  3918  }
  3919  
  3920  func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) {
  3921  	if delta == nil {
  3922  		return
  3923  	}
  3924  
  3925  	a.Cpu.Add(&delta.Cpu)
  3926  	a.Memory.Add(&delta.Memory)
  3927  
  3928  	for _, n := range delta.Networks {
  3929  		// Find the matching interface by IP or CIDR
  3930  		idx := a.NetIndex(n)
  3931  		if idx == -1 {
  3932  			a.Networks = append(a.Networks, n.Copy())
  3933  		} else {
  3934  			a.Networks[idx].Add(n)
  3935  		}
  3936  	}
  3937  
  3938  	for _, d := range delta.Devices {
  3939  		// Find the matching device
  3940  		idx := AllocatedDevices(a.Devices).Index(d)
  3941  		if idx == -1 {
  3942  			a.Devices = append(a.Devices, d.Copy())
  3943  		} else {
  3944  			a.Devices[idx].Add(d)
  3945  		}
  3946  	}
  3947  }
  3948  
  3949  func (a *AllocatedTaskResources) Max(other *AllocatedTaskResources) {
  3950  	if other == nil {
  3951  		return
  3952  	}
  3953  
  3954  	a.Cpu.Max(&other.Cpu)
  3955  	a.Memory.Max(&other.Memory)
  3956  
  3957  	for _, n := range other.Networks {
  3958  		// Find the matching interface by IP or CIDR
  3959  		idx := a.NetIndex(n)
  3960  		if idx == -1 {
  3961  			a.Networks = append(a.Networks, n.Copy())
  3962  		} else {
  3963  			a.Networks[idx].Add(n)
  3964  		}
  3965  	}
  3966  
  3967  	for _, d := range other.Devices {
  3968  		// Find the matching device
  3969  		idx := AllocatedDevices(a.Devices).Index(d)
  3970  		if idx == -1 {
  3971  			a.Devices = append(a.Devices, d.Copy())
  3972  		} else {
  3973  			a.Devices[idx].Add(d)
  3974  		}
  3975  	}
  3976  }
  3977  
  3978  // Comparable turns AllocatedTaskResources into ComparableResources
  3979  // as a helper step in preemption
  3980  func (a *AllocatedTaskResources) Comparable() *ComparableResources {
  3981  	ret := &ComparableResources{
  3982  		Flattened: AllocatedTaskResources{
  3983  			Cpu: AllocatedCpuResources{
  3984  				CpuShares:     a.Cpu.CpuShares,
  3985  				ReservedCores: a.Cpu.ReservedCores,
  3986  			},
  3987  			Memory: AllocatedMemoryResources{
  3988  				MemoryMB:    a.Memory.MemoryMB,
  3989  				MemoryMaxMB: a.Memory.MemoryMaxMB,
  3990  			},
  3991  		},
  3992  	}
  3993  	ret.Flattened.Networks = append(ret.Flattened.Networks, a.Networks...)
  3994  	return ret
  3995  }
  3996  
  3997  // Subtract only subtracts CPU and Memory resources. Network utilization
  3998  // is managed separately in NetworkIndex
  3999  func (a *AllocatedTaskResources) Subtract(delta *AllocatedTaskResources) {
  4000  	if delta == nil {
  4001  		return
  4002  	}
  4003  
  4004  	a.Cpu.Subtract(&delta.Cpu)
  4005  	a.Memory.Subtract(&delta.Memory)
  4006  }
  4007  
  4008  // AllocatedSharedResources are the set of resources allocated to a task group.
  4009  type AllocatedSharedResources struct {
  4010  	Networks Networks
  4011  	DiskMB   int64
  4012  	Ports    AllocatedPorts
  4013  }
  4014  
  4015  func (a AllocatedSharedResources) Copy() AllocatedSharedResources {
  4016  	return AllocatedSharedResources{
  4017  		Networks: a.Networks.Copy(),
  4018  		DiskMB:   a.DiskMB,
  4019  		Ports:    a.Ports,
  4020  	}
  4021  }
  4022  
  4023  func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) {
  4024  	if delta == nil {
  4025  		return
  4026  	}
  4027  	a.Networks = append(a.Networks, delta.Networks...)
  4028  	a.DiskMB += delta.DiskMB
  4029  
  4030  }
  4031  
  4032  func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) {
  4033  	if delta == nil {
  4034  		return
  4035  	}
  4036  
  4037  	diff := map[*NetworkResource]bool{}
  4038  	for _, n := range delta.Networks {
  4039  		diff[n] = true
  4040  	}
  4041  	var nets Networks
  4042  	for _, n := range a.Networks {
  4043  		if _, ok := diff[n]; !ok {
  4044  			nets = append(nets, n)
  4045  		}
  4046  	}
  4047  	a.Networks = nets
  4048  	a.DiskMB -= delta.DiskMB
  4049  }
  4050  
  4051  func (a *AllocatedSharedResources) Canonicalize() {
  4052  	if len(a.Networks) > 0 {
  4053  		if len(a.Networks[0].DynamicPorts)+len(a.Networks[0].ReservedPorts) > 0 && len(a.Ports) == 0 {
  4054  			for _, ports := range [][]Port{a.Networks[0].DynamicPorts, a.Networks[0].ReservedPorts} {
  4055  				for _, p := range ports {
  4056  					a.Ports = append(a.Ports, AllocatedPortMapping{
  4057  						Label:  p.Label,
  4058  						Value:  p.Value,
  4059  						To:     p.To,
  4060  						HostIP: a.Networks[0].IP,
  4061  					})
  4062  				}
  4063  			}
  4064  		}
  4065  	}
  4066  }
  4067  
  4068  // AllocatedCpuResources captures the allocated CPU resources.
  4069  type AllocatedCpuResources struct {
  4070  	CpuShares     int64
  4071  	ReservedCores []uint16
  4072  }
  4073  
  4074  func (a *AllocatedCpuResources) Add(delta *AllocatedCpuResources) {
  4075  	if delta == nil {
  4076  		return
  4077  	}
  4078  
  4079  	a.CpuShares += delta.CpuShares
  4080  
  4081  	a.ReservedCores = cpuset.New(a.ReservedCores...).Union(cpuset.New(delta.ReservedCores...)).ToSlice()
  4082  }
  4083  
  4084  func (a *AllocatedCpuResources) Subtract(delta *AllocatedCpuResources) {
  4085  	if delta == nil {
  4086  		return
  4087  	}
  4088  
  4089  	a.CpuShares -= delta.CpuShares
  4090  	a.ReservedCores = cpuset.New(a.ReservedCores...).Difference(cpuset.New(delta.ReservedCores...)).ToSlice()
  4091  }
  4092  
  4093  func (a *AllocatedCpuResources) Max(other *AllocatedCpuResources) {
  4094  	if other == nil {
  4095  		return
  4096  	}
  4097  
  4098  	if other.CpuShares > a.CpuShares {
  4099  		a.CpuShares = other.CpuShares
  4100  	}
  4101  
  4102  	if len(other.ReservedCores) > len(a.ReservedCores) {
  4103  		a.ReservedCores = other.ReservedCores
  4104  	}
  4105  }
  4106  
  4107  // AllocatedMemoryResources captures the allocated memory resources.
  4108  type AllocatedMemoryResources struct {
  4109  	MemoryMB    int64
  4110  	MemoryMaxMB int64
  4111  }
  4112  
  4113  func (a *AllocatedMemoryResources) Add(delta *AllocatedMemoryResources) {
  4114  	if delta == nil {
  4115  		return
  4116  	}
  4117  
  4118  	a.MemoryMB += delta.MemoryMB
  4119  	if delta.MemoryMaxMB != 0 {
  4120  		a.MemoryMaxMB += delta.MemoryMaxMB
  4121  	} else {
  4122  		a.MemoryMaxMB += delta.MemoryMB
  4123  	}
  4124  }
  4125  
  4126  func (a *AllocatedMemoryResources) Subtract(delta *AllocatedMemoryResources) {
  4127  	if delta == nil {
  4128  		return
  4129  	}
  4130  
  4131  	a.MemoryMB -= delta.MemoryMB
  4132  	if delta.MemoryMaxMB != 0 {
  4133  		a.MemoryMaxMB -= delta.MemoryMaxMB
  4134  	} else {
  4135  		a.MemoryMaxMB -= delta.MemoryMB
  4136  	}
  4137  }
  4138  
  4139  func (a *AllocatedMemoryResources) Max(other *AllocatedMemoryResources) {
  4140  	if other == nil {
  4141  		return
  4142  	}
  4143  
  4144  	if other.MemoryMB > a.MemoryMB {
  4145  		a.MemoryMB = other.MemoryMB
  4146  	}
  4147  	if other.MemoryMaxMB > a.MemoryMaxMB {
  4148  		a.MemoryMaxMB = other.MemoryMaxMB
  4149  	}
  4150  }
  4151  
  4152  type AllocatedDevices []*AllocatedDeviceResource
  4153  
  4154  // Index finds the matching index using the passed device. If not found, -1 is
  4155  // returned.
  4156  func (a AllocatedDevices) Index(d *AllocatedDeviceResource) int {
  4157  	if d == nil {
  4158  		return -1
  4159  	}
  4160  
  4161  	for i, o := range a {
  4162  		if o.ID().Equal(d.ID()) {
  4163  			return i
  4164  		}
  4165  	}
  4166  
  4167  	return -1
  4168  }
  4169  
  4170  // AllocatedDeviceResource captures a set of allocated devices.
  4171  type AllocatedDeviceResource struct {
  4172  	// Vendor, Type, and Name are used to select the plugin to request the
  4173  	// device IDs from.
  4174  	Vendor string
  4175  	Type   string
  4176  	Name   string
  4177  
  4178  	// DeviceIDs is the set of allocated devices
  4179  	DeviceIDs []string
  4180  }
  4181  
  4182  func (a *AllocatedDeviceResource) ID() *DeviceIdTuple {
  4183  	if a == nil {
  4184  		return nil
  4185  	}
  4186  
  4187  	return &DeviceIdTuple{
  4188  		Vendor: a.Vendor,
  4189  		Type:   a.Type,
  4190  		Name:   a.Name,
  4191  	}
  4192  }
  4193  
  4194  func (a *AllocatedDeviceResource) Add(delta *AllocatedDeviceResource) {
  4195  	if delta == nil {
  4196  		return
  4197  	}
  4198  
  4199  	a.DeviceIDs = append(a.DeviceIDs, delta.DeviceIDs...)
  4200  }
  4201  
  4202  func (a *AllocatedDeviceResource) Copy() *AllocatedDeviceResource {
  4203  	if a == nil {
  4204  		return a
  4205  	}
  4206  
  4207  	na := *a
  4208  
  4209  	// Copy the devices
  4210  	na.DeviceIDs = make([]string, len(a.DeviceIDs))
  4211  	copy(na.DeviceIDs, a.DeviceIDs)
  4212  	return &na
  4213  }
  4214  
  4215  // ComparableResources is the set of resources allocated to a task group but
  4216  // not keyed by Task, making it easier to compare.
  4217  type ComparableResources struct {
  4218  	Flattened AllocatedTaskResources
  4219  	Shared    AllocatedSharedResources
  4220  }
  4221  
  4222  func (c *ComparableResources) Add(delta *ComparableResources) {
  4223  	if delta == nil {
  4224  		return
  4225  	}
  4226  
  4227  	c.Flattened.Add(&delta.Flattened)
  4228  	c.Shared.Add(&delta.Shared)
  4229  }
  4230  
  4231  func (c *ComparableResources) Subtract(delta *ComparableResources) {
  4232  	if delta == nil {
  4233  		return
  4234  	}
  4235  
  4236  	c.Flattened.Subtract(&delta.Flattened)
  4237  	c.Shared.Subtract(&delta.Shared)
  4238  }
  4239  
  4240  func (c *ComparableResources) Copy() *ComparableResources {
  4241  	if c == nil {
  4242  		return nil
  4243  	}
  4244  	newR := new(ComparableResources)
  4245  	*newR = *c
  4246  	return newR
  4247  }
  4248  
  4249  // Superset checks if one set of resources is a superset of another. This
  4250  // ignores network resources, and the NetworkIndex should be used for that.
  4251  func (c *ComparableResources) Superset(other *ComparableResources) (bool, string) {
  4252  	if c.Flattened.Cpu.CpuShares < other.Flattened.Cpu.CpuShares {
  4253  		return false, "cpu"
  4254  	}
  4255  
  4256  	if len(c.Flattened.Cpu.ReservedCores) > 0 && !cpuset.New(c.Flattened.Cpu.ReservedCores...).IsSupersetOf(cpuset.New(other.Flattened.Cpu.ReservedCores...)) {
  4257  		return false, "cores"
  4258  	}
  4259  	if c.Flattened.Memory.MemoryMB < other.Flattened.Memory.MemoryMB {
  4260  		return false, "memory"
  4261  	}
  4262  	if c.Shared.DiskMB < other.Shared.DiskMB {
  4263  		return false, "disk"
  4264  	}
  4265  	return true, ""
  4266  }
  4267  
  4268  // NetIndex finds the matching net index using device name
  4269  func (c *ComparableResources) NetIndex(n *NetworkResource) int {
  4270  	return c.Flattened.Networks.NetIndex(n)
  4271  }
  4272  
  4273  const (
  4274  	// JobTypeCore is reserved for internal system tasks and is
  4275  	// always handled by the CoreScheduler.
  4276  	JobTypeCore     = "_core"
  4277  	JobTypeService  = "service"
  4278  	JobTypeBatch    = "batch"
  4279  	JobTypeSystem   = "system"
  4280  	JobTypeSysBatch = "sysbatch"
  4281  )
  4282  
  4283  const (
  4284  	JobStatusPending = "pending" // Pending means the job is waiting on scheduling
  4285  	JobStatusRunning = "running" // Running means the job has non-terminal allocations
  4286  	JobStatusDead    = "dead"    // Dead means all evaluation's and allocations are terminal
  4287  )
  4288  
  4289  const (
  4290  	// JobMinPriority is the minimum allowed priority
  4291  	JobMinPriority = 1
  4292  
  4293  	// JobDefaultPriority is the default priority if not specified.
  4294  	JobDefaultPriority = 50
  4295  
  4296  	// JobDefaultMaxPriority is the default maximum allowed priority
  4297  	JobDefaultMaxPriority = 100
  4298  
  4299  	// JobMaxPriority is the maximum allowed configuration value for maximum job priority
  4300  	JobMaxPriority = math.MaxInt16 - 1
  4301  
  4302  	// CoreJobPriority should be higher than any user
  4303  	// specified job so that it gets priority. This is important
  4304  	// for the system to remain healthy.
  4305  	CoreJobPriority = math.MaxInt16
  4306  
  4307  	// JobTrackedVersions is the number of historic job versions that are
  4308  	// kept.
  4309  	JobTrackedVersions = 6
  4310  
  4311  	// JobTrackedScalingEvents is the number of scaling events that are
  4312  	// kept for a single task group.
  4313  	JobTrackedScalingEvents = 20
  4314  )
  4315  
  4316  // A JobSubmission contains the original job specification, along with the Variables
  4317  // submitted with the job.
  4318  type JobSubmission struct {
  4319  	// Source contains the original job definition (may be hc1, hcl2, or json)
  4320  	Source string
  4321  
  4322  	// Format indicates whether the original job was hcl1, hcl2, or json.
  4323  	Format string
  4324  
  4325  	// VariableFlags contain the CLI "-var" flag arguments as submitted with the
  4326  	// job (hcl2 only).
  4327  	VariableFlags map[string]string
  4328  
  4329  	// Variables contains the opaque variable blob that was input from the
  4330  	// webUI (hcl2 only).
  4331  	Variables string
  4332  
  4333  	// Namespace is managed internally, do not set.
  4334  	//
  4335  	// The namespace the associated job belongs to.
  4336  	Namespace string
  4337  
  4338  	// JobID is managed internally, not set.
  4339  	//
  4340  	// The job.ID field.
  4341  	JobID string
  4342  
  4343  	// Version is managed internally, not set.
  4344  	//
  4345  	// The version of the Job this submission is associated with.
  4346  	Version uint64
  4347  
  4348  	// JobModifyIndex is managed internally, not set.
  4349  	//
  4350  	// The raft index the Job this submission is associated with.
  4351  	JobModifyIndex uint64
  4352  }
  4353  
  4354  // Hash returns a value representative of the intended uniquness of a
  4355  // JobSubmission in the job_submission state store table (namespace, jobID, version).
  4356  func (js *JobSubmission) Hash() string {
  4357  	return fmt.Sprintf("%s \x00 %s \x00 %d", js.Namespace, js.JobID, js.Version)
  4358  }
  4359  
  4360  // Copy creates a deep copy of js.
  4361  func (js *JobSubmission) Copy() *JobSubmission {
  4362  	if js == nil {
  4363  		return nil
  4364  	}
  4365  	return &JobSubmission{
  4366  		Source:         js.Source,
  4367  		Format:         js.Format,
  4368  		VariableFlags:  maps.Clone(js.VariableFlags),
  4369  		Variables:      js.Variables,
  4370  		Namespace:      js.Namespace,
  4371  		JobID:          js.JobID,
  4372  		Version:        js.Version,
  4373  		JobModifyIndex: js.JobModifyIndex,
  4374  	}
  4375  }
  4376  
  4377  // Job is the scope of a scheduling request to Nomad. It is the largest
  4378  // scoped object, and is a named collection of task groups. Each task group
  4379  // is further composed of tasks. A task group (TG) is the unit of scheduling
  4380  // however.
  4381  type Job struct {
  4382  	// Stop marks whether the user has stopped the job. A stopped job will
  4383  	// have all created allocations stopped and acts as a way to stop a job
  4384  	// without purging it from the system. This allows existing allocs to be
  4385  	// queried and the job to be inspected as it is being killed.
  4386  	Stop bool
  4387  
  4388  	// Region is the Nomad region that handles scheduling this job
  4389  	Region string
  4390  
  4391  	// Namespace is the namespace the job is submitted into.
  4392  	Namespace string
  4393  
  4394  	// ID is a unique identifier for the job per region. It can be
  4395  	// specified hierarchically like LineOfBiz/OrgName/Team/Project
  4396  	ID string
  4397  
  4398  	// ParentID is the unique identifier of the job that spawned this job.
  4399  	ParentID string
  4400  
  4401  	// Name is the logical name of the job used to refer to it. This is unique
  4402  	// per region, but not unique globally.
  4403  	Name string
  4404  
  4405  	// Type is used to control various behaviors about the job. Most jobs
  4406  	// are service jobs, meaning they are expected to be long lived.
  4407  	// Some jobs are batch oriented meaning they run and then terminate.
  4408  	// This can be extended in the future to support custom schedulers.
  4409  	Type string
  4410  
  4411  	// Priority is used to control scheduling importance and if this job
  4412  	// can preempt other jobs.
  4413  	Priority int
  4414  
  4415  	// AllAtOnce is used to control if incremental scheduling of task groups
  4416  	// is allowed or if we must do a gang scheduling of the entire job. This
  4417  	// can slow down larger jobs if resources are not available.
  4418  	AllAtOnce bool
  4419  
  4420  	// Datacenters contains all the datacenters this job is allowed to span
  4421  	Datacenters []string
  4422  
  4423  	// NodePool specifies the node pool this job is allowed to run on.
  4424  	//
  4425  	// An empty value is allowed during job registration, in which case the
  4426  	// namespace default node pool is used in Enterprise and the 'default' node
  4427  	// pool in OSS. But a node pool must be set before the job is stored, so
  4428  	// that will happen in the admission mutators.
  4429  	NodePool string
  4430  
  4431  	// Constraints can be specified at a job level and apply to
  4432  	// all the task groups and tasks.
  4433  	Constraints []*Constraint
  4434  
  4435  	// Affinities can be specified at the job level to express
  4436  	// scheduling preferences that apply to all groups and tasks
  4437  	Affinities []*Affinity
  4438  
  4439  	// Spread can be specified at the job level to express spreading
  4440  	// allocations across a desired attribute, such as datacenter
  4441  	Spreads []*Spread
  4442  
  4443  	// TaskGroups are the collections of task groups that this job needs
  4444  	// to run. Each task group is an atomic unit of scheduling and placement.
  4445  	TaskGroups []*TaskGroup
  4446  
  4447  	// See agent.ApiJobToStructJob
  4448  	// Update provides defaults for the TaskGroup Update blocks
  4449  	Update UpdateStrategy
  4450  
  4451  	Multiregion *Multiregion
  4452  
  4453  	// Periodic is used to define the interval the job is run at.
  4454  	Periodic *PeriodicConfig
  4455  
  4456  	// ParameterizedJob is used to specify the job as a parameterized job
  4457  	// for dispatching.
  4458  	ParameterizedJob *ParameterizedJobConfig
  4459  
  4460  	// Dispatched is used to identify if the Job has been dispatched from a
  4461  	// parameterized job.
  4462  	Dispatched bool
  4463  
  4464  	// DispatchIdempotencyToken is optionally used to ensure that a dispatched job does not have any
  4465  	// non-terminal siblings which have the same token value.
  4466  	DispatchIdempotencyToken string
  4467  
  4468  	// Payload is the payload supplied when the job was dispatched.
  4469  	Payload []byte
  4470  
  4471  	// Meta is used to associate arbitrary metadata with this
  4472  	// job. This is opaque to Nomad.
  4473  	Meta map[string]string
  4474  
  4475  	// ConsulToken is the Consul token that proves the submitter of the job has
  4476  	// access to the Service Identity policies associated with the job's
  4477  	// Consul Connect enabled services. This field is only used to transfer the
  4478  	// token and is not stored after Job submission.
  4479  	ConsulToken string
  4480  
  4481  	// ConsulNamespace is the Consul namespace
  4482  	ConsulNamespace string
  4483  
  4484  	// VaultToken is the Vault token that proves the submitter of the job has
  4485  	// access to the specified Vault policies. This field is only used to
  4486  	// transfer the token and is not stored after Job submission.
  4487  	VaultToken string
  4488  
  4489  	// VaultNamespace is the Vault namespace
  4490  	VaultNamespace string
  4491  
  4492  	// NomadTokenID is the Accessor ID of the ACL token (if any)
  4493  	// used to register this version of the job. Used by deploymentwatcher.
  4494  	NomadTokenID string
  4495  
  4496  	// Job status
  4497  	Status string
  4498  
  4499  	// StatusDescription is meant to provide more human useful information
  4500  	StatusDescription string
  4501  
  4502  	// Stable marks a job as stable. Stability is only defined on "service" and
  4503  	// "system" jobs. The stability of a job will be set automatically as part
  4504  	// of a deployment and can be manually set via APIs. This field is updated
  4505  	// when the status of a corresponding deployment transitions to Failed
  4506  	// or Successful. This field is not meaningful for jobs that don't have an
  4507  	// update block.
  4508  	Stable bool
  4509  
  4510  	// Version is a monotonically increasing version number that is incremented
  4511  	// on each job register.
  4512  	Version uint64
  4513  
  4514  	// SubmitTime is the time at which the job was submitted as a UnixNano in
  4515  	// UTC
  4516  	SubmitTime int64
  4517  
  4518  	// Raft Indexes
  4519  	CreateIndex    uint64
  4520  	ModifyIndex    uint64
  4521  	JobModifyIndex uint64
  4522  }
  4523  
  4524  // NamespacedID returns the namespaced id useful for logging
  4525  func (j *Job) NamespacedID() NamespacedID {
  4526  	return NamespacedID{
  4527  		ID:        j.ID,
  4528  		Namespace: j.Namespace,
  4529  	}
  4530  }
  4531  
  4532  // GetID implements the IDGetter interface, required for pagination.
  4533  func (j *Job) GetID() string {
  4534  	if j == nil {
  4535  		return ""
  4536  	}
  4537  	return j.ID
  4538  }
  4539  
  4540  // GetNamespace implements the NamespaceGetter interface, required for
  4541  // pagination and filtering namespaces in endpoints that support glob namespace
  4542  // requests using tokens with limited access.
  4543  func (j *Job) GetNamespace() string {
  4544  	if j == nil {
  4545  		return ""
  4546  	}
  4547  	return j.Namespace
  4548  }
  4549  
  4550  // GetCreateIndex implements the CreateIndexGetter interface, required for
  4551  // pagination.
  4552  func (j *Job) GetCreateIndex() uint64 {
  4553  	if j == nil {
  4554  		return 0
  4555  	}
  4556  	return j.CreateIndex
  4557  }
  4558  
  4559  // Canonicalize is used to canonicalize fields in the Job. This should be
  4560  // called when registering a Job.
  4561  func (j *Job) Canonicalize() {
  4562  	if j == nil {
  4563  		return
  4564  	}
  4565  
  4566  	// Ensure that an empty and nil map are treated the same to avoid scheduling
  4567  	// problems since we use reflect DeepEquals.
  4568  	if len(j.Meta) == 0 {
  4569  		j.Meta = nil
  4570  	}
  4571  
  4572  	// Ensure the job is in a namespace.
  4573  	if j.Namespace == "" {
  4574  		j.Namespace = DefaultNamespace
  4575  	}
  4576  
  4577  	if len(j.Datacenters) == 0 {
  4578  		j.Datacenters = []string{"*"}
  4579  	}
  4580  
  4581  	for _, tg := range j.TaskGroups {
  4582  		tg.Canonicalize(j)
  4583  	}
  4584  
  4585  	if j.ParameterizedJob != nil {
  4586  		j.ParameterizedJob.Canonicalize()
  4587  	}
  4588  
  4589  	if j.Multiregion != nil {
  4590  		j.Multiregion.Canonicalize()
  4591  	}
  4592  
  4593  	if j.Periodic != nil {
  4594  		j.Periodic.Canonicalize()
  4595  	}
  4596  }
  4597  
  4598  // Copy returns a deep copy of the Job. It is expected that callers use recover.
  4599  // This job can panic if the deep copy failed as it uses reflection.
  4600  func (j *Job) Copy() *Job {
  4601  	if j == nil {
  4602  		return nil
  4603  	}
  4604  	nj := new(Job)
  4605  	*nj = *j
  4606  	nj.Datacenters = slices.Clone(nj.Datacenters)
  4607  	nj.Constraints = CopySliceConstraints(nj.Constraints)
  4608  	nj.Affinities = CopySliceAffinities(nj.Affinities)
  4609  	nj.Multiregion = nj.Multiregion.Copy()
  4610  
  4611  	if j.TaskGroups != nil {
  4612  		tgs := make([]*TaskGroup, len(nj.TaskGroups))
  4613  		for i, tg := range nj.TaskGroups {
  4614  			tgs[i] = tg.Copy()
  4615  		}
  4616  		nj.TaskGroups = tgs
  4617  	}
  4618  
  4619  	nj.Periodic = nj.Periodic.Copy()
  4620  	nj.Meta = maps.Clone(nj.Meta)
  4621  	nj.ParameterizedJob = nj.ParameterizedJob.Copy()
  4622  	return nj
  4623  }
  4624  
  4625  // Validate is used to check a job for reasonable configuration
  4626  func (j *Job) Validate() error {
  4627  	var mErr multierror.Error
  4628  
  4629  	if j.Region == "" && j.Multiregion == nil {
  4630  		mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
  4631  	}
  4632  	if j.ID == "" {
  4633  		mErr.Errors = append(mErr.Errors, errors.New("Missing job ID"))
  4634  	} else if strings.Contains(j.ID, " ") {
  4635  		mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space"))
  4636  	} else if strings.Contains(j.ID, "\000") {
  4637  		mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a null character"))
  4638  	}
  4639  	if j.Name == "" {
  4640  		mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
  4641  	} else if strings.Contains(j.Name, "\000") {
  4642  		mErr.Errors = append(mErr.Errors, errors.New("Job Name contains a null character"))
  4643  	}
  4644  	if j.Namespace == "" {
  4645  		mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace"))
  4646  	}
  4647  	switch j.Type {
  4648  	case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem, JobTypeSysBatch:
  4649  	case "":
  4650  		mErr.Errors = append(mErr.Errors, errors.New("Missing job type"))
  4651  	default:
  4652  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type))
  4653  	}
  4654  	if len(j.Datacenters) == 0 && !j.IsMultiregion() {
  4655  		mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters"))
  4656  	} else {
  4657  		for _, v := range j.Datacenters {
  4658  			if v == "" {
  4659  				mErr.Errors = append(mErr.Errors, errors.New("Job datacenter must be non-empty string"))
  4660  			}
  4661  		}
  4662  	}
  4663  
  4664  	if len(j.TaskGroups) == 0 {
  4665  		mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
  4666  	}
  4667  	for idx, constr := range j.Constraints {
  4668  		if err := constr.Validate(); err != nil {
  4669  			outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
  4670  			mErr.Errors = append(mErr.Errors, outer)
  4671  		}
  4672  	}
  4673  	if j.Type == JobTypeSystem {
  4674  		if j.Affinities != nil {
  4675  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block"))
  4676  		}
  4677  	} else {
  4678  		for idx, affinity := range j.Affinities {
  4679  			if err := affinity.Validate(); err != nil {
  4680  				outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
  4681  				mErr.Errors = append(mErr.Errors, outer)
  4682  			}
  4683  		}
  4684  	}
  4685  
  4686  	if j.Type == JobTypeSystem {
  4687  		if j.Spreads != nil {
  4688  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block"))
  4689  		}
  4690  	} else {
  4691  		for idx, spread := range j.Spreads {
  4692  			if err := spread.Validate(); err != nil {
  4693  				outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err)
  4694  				mErr.Errors = append(mErr.Errors, outer)
  4695  			}
  4696  		}
  4697  	}
  4698  
  4699  	// Check for duplicate task groups
  4700  	taskGroups := make(map[string]int)
  4701  	for idx, tg := range j.TaskGroups {
  4702  		if tg.Name == "" {
  4703  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1))
  4704  		} else if existing, ok := taskGroups[tg.Name]; ok {
  4705  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1))
  4706  		} else {
  4707  			taskGroups[tg.Name] = idx
  4708  		}
  4709  
  4710  		if tg.ShutdownDelay != nil && *tg.ShutdownDelay < 0 {
  4711  			mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
  4712  		}
  4713  
  4714  		if tg.StopAfterClientDisconnect != nil && *tg.StopAfterClientDisconnect != 0 {
  4715  			if *tg.StopAfterClientDisconnect > 0 &&
  4716  				!(j.Type == JobTypeBatch || j.Type == JobTypeService) {
  4717  				mErr.Errors = append(mErr.Errors, errors.New("stop_after_client_disconnect can only be set in batch and service jobs"))
  4718  			} else if *tg.StopAfterClientDisconnect < 0 {
  4719  				mErr.Errors = append(mErr.Errors, errors.New("stop_after_client_disconnect must be a positive value"))
  4720  			}
  4721  		}
  4722  
  4723  		if j.Type == "system" && tg.Count > 1 {
  4724  			mErr.Errors = append(mErr.Errors,
  4725  				fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler",
  4726  					tg.Name, tg.Count))
  4727  		}
  4728  	}
  4729  
  4730  	// Validate the task group
  4731  	for _, tg := range j.TaskGroups {
  4732  		if err := tg.Validate(j); err != nil {
  4733  			outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err)
  4734  			mErr.Errors = append(mErr.Errors, outer)
  4735  		}
  4736  	}
  4737  
  4738  	// Validate periodic is only used with batch or sysbatch jobs.
  4739  	if j.IsPeriodic() && j.Periodic.Enabled {
  4740  		if j.Type != JobTypeBatch && j.Type != JobTypeSysBatch {
  4741  			mErr.Errors = append(mErr.Errors, fmt.Errorf(
  4742  				"Periodic can only be used with %q or %q scheduler", JobTypeBatch, JobTypeSysBatch,
  4743  			))
  4744  		}
  4745  
  4746  		if err := j.Periodic.Validate(); err != nil {
  4747  			mErr.Errors = append(mErr.Errors, err)
  4748  		}
  4749  	}
  4750  
  4751  	if j.IsParameterized() {
  4752  		if j.Type != JobTypeBatch && j.Type != JobTypeSysBatch {
  4753  			mErr.Errors = append(mErr.Errors, fmt.Errorf(
  4754  				"Parameterized job can only be used with %q or %q scheduler", JobTypeBatch, JobTypeSysBatch,
  4755  			))
  4756  		}
  4757  
  4758  		if err := j.ParameterizedJob.Validate(); err != nil {
  4759  			mErr.Errors = append(mErr.Errors, err)
  4760  		}
  4761  	}
  4762  
  4763  	if j.IsMultiregion() {
  4764  		if err := j.Multiregion.Validate(j.Type, j.Datacenters); err != nil {
  4765  			mErr.Errors = append(mErr.Errors, err)
  4766  		}
  4767  	}
  4768  
  4769  	return mErr.ErrorOrNil()
  4770  }
  4771  
  4772  // Warnings returns a list of warnings that may be from dubious settings or
  4773  // deprecation warnings.
  4774  func (j *Job) Warnings() error {
  4775  	var mErr multierror.Error
  4776  
  4777  	// Check the groups
  4778  	hasAutoPromote, allAutoPromote := false, true
  4779  
  4780  	for _, tg := range j.TaskGroups {
  4781  		if err := tg.Warnings(j); err != nil {
  4782  			outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err)
  4783  			mErr.Errors = append(mErr.Errors, outer)
  4784  		}
  4785  
  4786  		if u := tg.Update; u != nil {
  4787  			hasAutoPromote = hasAutoPromote || u.AutoPromote
  4788  
  4789  			// Having no canaries implies auto-promotion since there are no canaries to promote.
  4790  			allAutoPromote = allAutoPromote && (u.Canary == 0 || u.AutoPromote)
  4791  		}
  4792  	}
  4793  
  4794  	// Check AutoPromote, should be all or none
  4795  	if hasAutoPromote && !allAutoPromote {
  4796  		err := fmt.Errorf("auto_promote must be true for all groups to enable automatic promotion")
  4797  		mErr.Errors = append(mErr.Errors, err)
  4798  	}
  4799  
  4800  	return mErr.ErrorOrNil()
  4801  }
  4802  
  4803  // LookupTaskGroup finds a task group by name
  4804  func (j *Job) LookupTaskGroup(name string) *TaskGroup {
  4805  	for _, tg := range j.TaskGroups {
  4806  		if tg.Name == name {
  4807  			return tg
  4808  		}
  4809  	}
  4810  	return nil
  4811  }
  4812  
  4813  // CombinedTaskMeta takes a TaskGroup and Task name and returns the combined
  4814  // meta data for the task. When joining Job, Group and Task Meta, the precedence
  4815  // is by deepest scope (Task > Group > Job).
  4816  func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string {
  4817  	group := j.LookupTaskGroup(groupName)
  4818  	if group == nil {
  4819  		return j.Meta
  4820  	}
  4821  
  4822  	var meta map[string]string
  4823  
  4824  	task := group.LookupTask(taskName)
  4825  	if task != nil {
  4826  		meta = maps.Clone(task.Meta)
  4827  	}
  4828  
  4829  	if meta == nil {
  4830  		meta = make(map[string]string, len(group.Meta)+len(j.Meta))
  4831  	}
  4832  
  4833  	// Add the group specific meta
  4834  	for k, v := range group.Meta {
  4835  		if _, ok := meta[k]; !ok {
  4836  			meta[k] = v
  4837  		}
  4838  	}
  4839  
  4840  	// Add the job specific meta
  4841  	for k, v := range j.Meta {
  4842  		if _, ok := meta[k]; !ok {
  4843  			meta[k] = v
  4844  		}
  4845  	}
  4846  
  4847  	return meta
  4848  }
  4849  
  4850  // Stopped returns if a job is stopped.
  4851  func (j *Job) Stopped() bool {
  4852  	return j == nil || j.Stop
  4853  }
  4854  
  4855  // HasUpdateStrategy returns if any task group in the job has an update strategy
  4856  func (j *Job) HasUpdateStrategy() bool {
  4857  	for _, tg := range j.TaskGroups {
  4858  		if !tg.Update.IsEmpty() {
  4859  			return true
  4860  		}
  4861  	}
  4862  
  4863  	return false
  4864  }
  4865  
  4866  // Stub is used to return a summary of the job
  4867  func (j *Job) Stub(summary *JobSummary, fields *JobStubFields) *JobListStub {
  4868  	jobStub := &JobListStub{
  4869  		ID:                j.ID,
  4870  		Namespace:         j.Namespace,
  4871  		ParentID:          j.ParentID,
  4872  		Name:              j.Name,
  4873  		Datacenters:       j.Datacenters,
  4874  		NodePool:          j.NodePool,
  4875  		Multiregion:       j.Multiregion,
  4876  		Type:              j.Type,
  4877  		Priority:          j.Priority,
  4878  		Periodic:          j.IsPeriodic(),
  4879  		ParameterizedJob:  j.IsParameterized(),
  4880  		Stop:              j.Stop,
  4881  		Status:            j.Status,
  4882  		StatusDescription: j.StatusDescription,
  4883  		CreateIndex:       j.CreateIndex,
  4884  		ModifyIndex:       j.ModifyIndex,
  4885  		JobModifyIndex:    j.JobModifyIndex,
  4886  		SubmitTime:        j.SubmitTime,
  4887  		JobSummary:        summary,
  4888  	}
  4889  
  4890  	if fields != nil {
  4891  		if fields.Meta {
  4892  			jobStub.Meta = j.Meta
  4893  		}
  4894  	}
  4895  
  4896  	return jobStub
  4897  }
  4898  
  4899  // IsPeriodic returns whether a job is periodic.
  4900  func (j *Job) IsPeriodic() bool {
  4901  	return j.Periodic != nil
  4902  }
  4903  
  4904  // IsPeriodicActive returns whether the job is an active periodic job that will
  4905  // create child jobs
  4906  func (j *Job) IsPeriodicActive() bool {
  4907  	return j.IsPeriodic() && j.Periodic.Enabled && !j.Stopped() && !j.IsParameterized()
  4908  }
  4909  
  4910  // IsParameterized returns whether a job is parameterized job.
  4911  func (j *Job) IsParameterized() bool {
  4912  	return j.ParameterizedJob != nil && !j.Dispatched
  4913  }
  4914  
  4915  // IsMultiregion returns whether a job is multiregion
  4916  func (j *Job) IsMultiregion() bool {
  4917  	return j.Multiregion != nil && j.Multiregion.Regions != nil && len(j.Multiregion.Regions) > 0
  4918  }
  4919  
  4920  // IsPlugin returns whether a job is implements a plugin (currently just CSI)
  4921  func (j *Job) IsPlugin() bool {
  4922  	for _, tg := range j.TaskGroups {
  4923  		for _, task := range tg.Tasks {
  4924  			if task.CSIPluginConfig != nil {
  4925  				return true
  4926  			}
  4927  		}
  4928  	}
  4929  	return false
  4930  }
  4931  
  4932  // Vault returns the set of Vault blocks per task group, per task
  4933  func (j *Job) Vault() map[string]map[string]*Vault {
  4934  	blocks := make(map[string]map[string]*Vault, len(j.TaskGroups))
  4935  
  4936  	for _, tg := range j.TaskGroups {
  4937  		tgBlocks := make(map[string]*Vault, len(tg.Tasks))
  4938  
  4939  		for _, task := range tg.Tasks {
  4940  			if task.Vault == nil {
  4941  				continue
  4942  			}
  4943  
  4944  			tgBlocks[task.Name] = task.Vault
  4945  		}
  4946  
  4947  		if len(tgBlocks) != 0 {
  4948  			blocks[tg.Name] = tgBlocks
  4949  		}
  4950  	}
  4951  
  4952  	return blocks
  4953  }
  4954  
  4955  // ConnectTasks returns the set of Consul Connect enabled tasks defined on the
  4956  // job that will require a Service Identity token in the case that Consul ACLs
  4957  // are enabled. The TaskKind.Value is the name of the Consul service.
  4958  //
  4959  // This method is meaningful only after the Job has passed through the job
  4960  // submission Mutator functions.
  4961  func (j *Job) ConnectTasks() []TaskKind {
  4962  	var kinds []TaskKind
  4963  	for _, tg := range j.TaskGroups {
  4964  		for _, task := range tg.Tasks {
  4965  			if task.Kind.IsConnectProxy() ||
  4966  				task.Kind.IsConnectNative() ||
  4967  				task.Kind.IsAnyConnectGateway() {
  4968  				kinds = append(kinds, task.Kind)
  4969  			}
  4970  		}
  4971  	}
  4972  	return kinds
  4973  }
  4974  
  4975  // RequiredSignals returns a mapping of task groups to tasks to their required
  4976  // set of signals
  4977  func (j *Job) RequiredSignals() map[string]map[string][]string {
  4978  	signals := make(map[string]map[string][]string)
  4979  
  4980  	for _, tg := range j.TaskGroups {
  4981  		for _, task := range tg.Tasks {
  4982  			// Use this local one as a set
  4983  			taskSignals := make(map[string]struct{})
  4984  
  4985  			// Check if the Vault change mode uses signals
  4986  			if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal {
  4987  				taskSignals[task.Vault.ChangeSignal] = struct{}{}
  4988  			}
  4989  
  4990  			// If a user has specified a KillSignal, add it to required signals
  4991  			if task.KillSignal != "" {
  4992  				taskSignals[task.KillSignal] = struct{}{}
  4993  			}
  4994  
  4995  			// Check if any template change mode uses signals
  4996  			for _, t := range task.Templates {
  4997  				if t.ChangeMode != TemplateChangeModeSignal {
  4998  					continue
  4999  				}
  5000  
  5001  				taskSignals[t.ChangeSignal] = struct{}{}
  5002  			}
  5003  
  5004  			// Flatten and sort the signals
  5005  			l := len(taskSignals)
  5006  			if l == 0 {
  5007  				continue
  5008  			}
  5009  
  5010  			flat := make([]string, 0, l)
  5011  			for sig := range taskSignals {
  5012  				flat = append(flat, sig)
  5013  			}
  5014  
  5015  			sort.Strings(flat)
  5016  			tgSignals, ok := signals[tg.Name]
  5017  			if !ok {
  5018  				tgSignals = make(map[string][]string)
  5019  				signals[tg.Name] = tgSignals
  5020  			}
  5021  			tgSignals[task.Name] = flat
  5022  		}
  5023  
  5024  	}
  5025  
  5026  	return signals
  5027  }
  5028  
  5029  // SpecChanged determines if the functional specification has changed between
  5030  // two job versions.
  5031  func (j *Job) SpecChanged(new *Job) bool {
  5032  	if j == nil {
  5033  		return new != nil
  5034  	}
  5035  
  5036  	// Create a copy of the new job
  5037  	c := new.Copy()
  5038  
  5039  	// Update the new job so we can do a reflect
  5040  	c.Status = j.Status
  5041  	c.StatusDescription = j.StatusDescription
  5042  	c.Stable = j.Stable
  5043  	c.Version = j.Version
  5044  	c.CreateIndex = j.CreateIndex
  5045  	c.ModifyIndex = j.ModifyIndex
  5046  	c.JobModifyIndex = j.JobModifyIndex
  5047  	c.SubmitTime = j.SubmitTime
  5048  
  5049  	// cgbaker: FINISH: probably need some consideration of scaling policy ID here
  5050  
  5051  	// Deep equals the jobs
  5052  	return !reflect.DeepEqual(j, c)
  5053  }
  5054  
  5055  func (j *Job) SetSubmitTime() {
  5056  	j.SubmitTime = time.Now().UTC().UnixNano()
  5057  }
  5058  
  5059  // JobListStub is used to return a subset of job information
  5060  // for the job list
  5061  type JobListStub struct {
  5062  	ID                string
  5063  	ParentID          string
  5064  	Name              string
  5065  	Namespace         string `json:",omitempty"`
  5066  	Datacenters       []string
  5067  	NodePool          string
  5068  	Multiregion       *Multiregion
  5069  	Type              string
  5070  	Priority          int
  5071  	Periodic          bool
  5072  	ParameterizedJob  bool
  5073  	Stop              bool
  5074  	Status            string
  5075  	StatusDescription string
  5076  	JobSummary        *JobSummary
  5077  	CreateIndex       uint64
  5078  	ModifyIndex       uint64
  5079  	JobModifyIndex    uint64
  5080  	SubmitTime        int64
  5081  	Meta              map[string]string `json:",omitempty"`
  5082  }
  5083  
  5084  // JobSummary summarizes the state of the allocations of a job
  5085  type JobSummary struct {
  5086  	// JobID is the ID of the job the summary is for
  5087  	JobID string
  5088  
  5089  	// Namespace is the namespace of the job and its summary
  5090  	Namespace string
  5091  
  5092  	// Summary contains the summary per task group for the Job
  5093  	Summary map[string]TaskGroupSummary
  5094  
  5095  	// Children contains a summary for the children of this job.
  5096  	Children *JobChildrenSummary
  5097  
  5098  	// Raft Indexes
  5099  	CreateIndex uint64
  5100  	ModifyIndex uint64
  5101  }
  5102  
  5103  // Copy returns a new copy of JobSummary
  5104  func (js *JobSummary) Copy() *JobSummary {
  5105  	newJobSummary := new(JobSummary)
  5106  	*newJobSummary = *js
  5107  	newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary))
  5108  	for k, v := range js.Summary {
  5109  		newTGSummary[k] = v
  5110  	}
  5111  	newJobSummary.Summary = newTGSummary
  5112  	newJobSummary.Children = newJobSummary.Children.Copy()
  5113  	return newJobSummary
  5114  }
  5115  
  5116  // JobChildrenSummary contains the summary of children job statuses
  5117  type JobChildrenSummary struct {
  5118  	Pending int64
  5119  	Running int64
  5120  	Dead    int64
  5121  }
  5122  
  5123  // Copy returns a new copy of a JobChildrenSummary
  5124  func (jc *JobChildrenSummary) Copy() *JobChildrenSummary {
  5125  	if jc == nil {
  5126  		return nil
  5127  	}
  5128  
  5129  	njc := new(JobChildrenSummary)
  5130  	*njc = *jc
  5131  	return njc
  5132  }
  5133  
  5134  // TaskGroupSummary summarizes the state of all the allocations of a particular
  5135  // TaskGroup
  5136  type TaskGroupSummary struct {
  5137  	Queued   int
  5138  	Complete int
  5139  	Failed   int
  5140  	Running  int
  5141  	Starting int
  5142  	Lost     int
  5143  	Unknown  int
  5144  }
  5145  
  5146  const (
  5147  	// Checks uses any registered health check state in combination with task
  5148  	// states to determine if an allocation is healthy.
  5149  	UpdateStrategyHealthCheck_Checks = "checks"
  5150  
  5151  	// TaskStates uses the task states of an allocation to determine if the
  5152  	// allocation is healthy.
  5153  	UpdateStrategyHealthCheck_TaskStates = "task_states"
  5154  
  5155  	// Manual allows the operator to manually signal to Nomad when an
  5156  	// allocations is healthy. This allows more advanced health checking that is
  5157  	// outside of the scope of Nomad.
  5158  	UpdateStrategyHealthCheck_Manual = "manual"
  5159  )
  5160  
  5161  var (
  5162  	// DefaultUpdateStrategy provides a baseline that can be used to upgrade
  5163  	// jobs with the old policy or for populating field defaults.
  5164  	DefaultUpdateStrategy = &UpdateStrategy{
  5165  		Stagger:          30 * time.Second,
  5166  		MaxParallel:      1,
  5167  		HealthCheck:      UpdateStrategyHealthCheck_Checks,
  5168  		MinHealthyTime:   10 * time.Second,
  5169  		HealthyDeadline:  5 * time.Minute,
  5170  		ProgressDeadline: 10 * time.Minute,
  5171  		AutoRevert:       false,
  5172  		AutoPromote:      false,
  5173  		Canary:           0,
  5174  	}
  5175  )
  5176  
  5177  // UpdateStrategy is used to modify how updates are done
  5178  type UpdateStrategy struct {
  5179  	// Stagger is used to determine the rate at which allocations are migrated
  5180  	// due to down or draining nodes.
  5181  	Stagger time.Duration
  5182  
  5183  	// MaxParallel is how many updates can be done in parallel
  5184  	MaxParallel int
  5185  
  5186  	// HealthCheck specifies the mechanism in which allocations are marked
  5187  	// healthy or unhealthy as part of a deployment.
  5188  	HealthCheck string
  5189  
  5190  	// MinHealthyTime is the minimum time an allocation must be in the healthy
  5191  	// state before it is marked as healthy, unblocking more allocations to be
  5192  	// rolled.
  5193  	MinHealthyTime time.Duration
  5194  
  5195  	// HealthyDeadline is the time in which an allocation must be marked as
  5196  	// healthy before it is automatically transitioned to unhealthy. This time
  5197  	// period doesn't count against the MinHealthyTime.
  5198  	HealthyDeadline time.Duration
  5199  
  5200  	// ProgressDeadline is the time in which an allocation as part of the
  5201  	// deployment must transition to healthy. If no allocation becomes healthy
  5202  	// after the deadline, the deployment is marked as failed. If the deadline
  5203  	// is zero, the first failure causes the deployment to fail.
  5204  	ProgressDeadline time.Duration
  5205  
  5206  	// AutoRevert declares that if a deployment fails because of unhealthy
  5207  	// allocations, there should be an attempt to auto-revert the job to a
  5208  	// stable version.
  5209  	AutoRevert bool
  5210  
  5211  	// AutoPromote declares that the deployment should be promoted when all canaries are
  5212  	// healthy
  5213  	AutoPromote bool
  5214  
  5215  	// Canary is the number of canaries to deploy when a change to the task
  5216  	// group is detected.
  5217  	Canary int
  5218  }
  5219  
  5220  func (u *UpdateStrategy) Copy() *UpdateStrategy {
  5221  	if u == nil {
  5222  		return nil
  5223  	}
  5224  
  5225  	c := new(UpdateStrategy)
  5226  	*c = *u
  5227  	return c
  5228  }
  5229  
  5230  func (u *UpdateStrategy) Validate() error {
  5231  	if u == nil {
  5232  		return nil
  5233  	}
  5234  
  5235  	var mErr multierror.Error
  5236  	switch u.HealthCheck {
  5237  	case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
  5238  	default:
  5239  		_ = multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
  5240  	}
  5241  
  5242  	if u.MaxParallel < 0 {
  5243  		_ = multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel))
  5244  	}
  5245  	if u.Canary < 0 {
  5246  		_ = multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
  5247  	}
  5248  	if u.Canary == 0 && u.AutoPromote {
  5249  		_ = multierror.Append(&mErr, fmt.Errorf("Auto Promote requires a Canary count greater than zero"))
  5250  	}
  5251  	if u.MinHealthyTime < 0 {
  5252  		_ = multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
  5253  	}
  5254  	if u.HealthyDeadline <= 0 {
  5255  		_ = multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
  5256  	}
  5257  	if u.ProgressDeadline < 0 {
  5258  		_ = multierror.Append(&mErr, fmt.Errorf("Progress deadline must be zero or greater: %v", u.ProgressDeadline))
  5259  	}
  5260  	if u.MinHealthyTime >= u.HealthyDeadline {
  5261  		_ = multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline))
  5262  	}
  5263  	if u.ProgressDeadline != 0 && u.HealthyDeadline >= u.ProgressDeadline {
  5264  		_ = multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be less than progress deadline: %v > %v", u.HealthyDeadline, u.ProgressDeadline))
  5265  	}
  5266  	if u.Stagger <= 0 {
  5267  		_ = multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger))
  5268  	}
  5269  
  5270  	return mErr.ErrorOrNil()
  5271  }
  5272  
  5273  func (u *UpdateStrategy) IsEmpty() bool {
  5274  	if u == nil {
  5275  		return true
  5276  	}
  5277  
  5278  	// When the Job is transformed from api to struct, the Update Strategy block is
  5279  	// copied into the existing task groups, the only things that are passed along
  5280  	// are MaxParallel and Stagger, because they are enforced at job level.
  5281  	// That is why checking if MaxParallel is zero is enough to know if the
  5282  	// update block is empty.
  5283  
  5284  	return u.MaxParallel == 0
  5285  }
  5286  
  5287  // Rolling returns if a rolling strategy should be used.
  5288  // TODO(alexdadgar): Remove once no longer used by the scheduler.
  5289  func (u *UpdateStrategy) Rolling() bool {
  5290  	return u.Stagger > 0 && u.MaxParallel > 0
  5291  }
  5292  
  5293  type Multiregion struct {
  5294  	Strategy *MultiregionStrategy
  5295  	Regions  []*MultiregionRegion
  5296  }
  5297  
  5298  func (m *Multiregion) Canonicalize() {
  5299  	if m.Strategy == nil {
  5300  		m.Strategy = &MultiregionStrategy{}
  5301  	}
  5302  	if m.Regions == nil {
  5303  		m.Regions = []*MultiregionRegion{}
  5304  	}
  5305  }
  5306  
  5307  // Diff indicates whether the multiregion config has changed
  5308  func (m *Multiregion) Diff(m2 *Multiregion) bool {
  5309  	return !reflect.DeepEqual(m, m2)
  5310  }
  5311  
  5312  func (m *Multiregion) Copy() *Multiregion {
  5313  	if m == nil {
  5314  		return nil
  5315  	}
  5316  	copy := new(Multiregion)
  5317  	if m.Strategy != nil {
  5318  		copy.Strategy = &MultiregionStrategy{
  5319  			MaxParallel: m.Strategy.MaxParallel,
  5320  			OnFailure:   m.Strategy.OnFailure,
  5321  		}
  5322  	}
  5323  	for _, region := range m.Regions {
  5324  		copyRegion := &MultiregionRegion{
  5325  			Name:        region.Name,
  5326  			Count:       region.Count,
  5327  			Datacenters: []string{},
  5328  			NodePool:    region.NodePool,
  5329  			Meta:        map[string]string{},
  5330  		}
  5331  		copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...)
  5332  		for k, v := range region.Meta {
  5333  			copyRegion.Meta[k] = v
  5334  		}
  5335  		copy.Regions = append(copy.Regions, copyRegion)
  5336  	}
  5337  	return copy
  5338  }
  5339  
  5340  type MultiregionStrategy struct {
  5341  	MaxParallel int
  5342  	OnFailure   string
  5343  }
  5344  
  5345  type MultiregionRegion struct {
  5346  	Name        string
  5347  	Count       int
  5348  	Datacenters []string
  5349  	NodePool    string
  5350  	Meta        map[string]string
  5351  }
  5352  
  5353  // Namespace allows logically grouping jobs and their associated objects.
  5354  type Namespace struct {
  5355  	// Name is the name of the namespace
  5356  	Name string
  5357  
  5358  	// Description is a human readable description of the namespace
  5359  	Description string
  5360  
  5361  	// Quota is the quota specification that the namespace should account
  5362  	// against.
  5363  	Quota string
  5364  
  5365  	// Capabilities is the set of capabilities allowed for this namespace
  5366  	Capabilities *NamespaceCapabilities
  5367  
  5368  	// NodePoolConfiguration is the namespace configuration for handling node
  5369  	// pools.
  5370  	NodePoolConfiguration *NamespaceNodePoolConfiguration
  5371  
  5372  	// Meta is the set of metadata key/value pairs that attached to the namespace
  5373  	Meta map[string]string
  5374  
  5375  	// Hash is the hash of the namespace which is used to efficiently replicate
  5376  	// cross-regions.
  5377  	Hash []byte
  5378  
  5379  	// Raft Indexes
  5380  	CreateIndex uint64
  5381  	ModifyIndex uint64
  5382  }
  5383  
  5384  // NamespaceCapabilities represents a set of capabilities allowed for this
  5385  // namespace, to be checked at job submission time.
  5386  type NamespaceCapabilities struct {
  5387  	EnabledTaskDrivers  []string
  5388  	DisabledTaskDrivers []string
  5389  }
  5390  
  5391  // NamespaceNodePoolConfiguration stores configuration about node pools for a
  5392  // namespace.
  5393  type NamespaceNodePoolConfiguration struct {
  5394  	// Default is the node pool used by jobs in this namespace that don't
  5395  	// specify a node pool of their own.
  5396  	Default string
  5397  
  5398  	// Allowed specifies the node pools that are allowed to be used by jobs in
  5399  	// this namespace. By default, all node pools are allowed. If an empty list
  5400  	// is provided only the namespace's default node pool is allowed. This field
  5401  	// supports wildcard globbing through the use of `*` for multi-character
  5402  	// matching. This field cannot be used with Denied.
  5403  	Allowed []string
  5404  
  5405  	// Denied specifies the node pools that are not allowed to be used by jobs
  5406  	// in this namespace. This field supports wildcard globbing through the use
  5407  	// of `*` for multi-character matching. If specified, any node pool is
  5408  	// allowed to be used, except for those that match any of these patterns.
  5409  	// This field cannot be used with Allowed.
  5410  	Denied []string
  5411  }
  5412  
  5413  func (n *Namespace) Validate() error {
  5414  	var mErr multierror.Error
  5415  
  5416  	// Validate the name and description
  5417  	if !validNamespaceName.MatchString(n.Name) {
  5418  		err := fmt.Errorf("invalid name %q. Must match regex %s", n.Name, validNamespaceName)
  5419  		mErr.Errors = append(mErr.Errors, err)
  5420  	}
  5421  	if len(n.Description) > maxNamespaceDescriptionLength {
  5422  		err := fmt.Errorf("description longer than %d", maxNamespaceDescriptionLength)
  5423  		mErr.Errors = append(mErr.Errors, err)
  5424  	}
  5425  
  5426  	err := n.NodePoolConfiguration.Validate()
  5427  	switch e := err.(type) {
  5428  	case *multierror.Error:
  5429  		for _, npErr := range e.Errors {
  5430  			mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid node pool configuration: %v", npErr))
  5431  		}
  5432  	case error:
  5433  		mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid node pool configuration: %v", e))
  5434  	}
  5435  
  5436  	return mErr.ErrorOrNil()
  5437  }
  5438  
  5439  // SetHash is used to compute and set the hash of the namespace
  5440  func (n *Namespace) SetHash() []byte {
  5441  	// Initialize a 256bit Blake2 hash (32 bytes)
  5442  	hash, err := blake2b.New256(nil)
  5443  	if err != nil {
  5444  		panic(err)
  5445  	}
  5446  
  5447  	// Write all the user set fields
  5448  	_, _ = hash.Write([]byte(n.Name))
  5449  	_, _ = hash.Write([]byte(n.Description))
  5450  	_, _ = hash.Write([]byte(n.Quota))
  5451  	if n.Capabilities != nil {
  5452  		for _, driver := range n.Capabilities.EnabledTaskDrivers {
  5453  			_, _ = hash.Write([]byte(driver))
  5454  		}
  5455  		for _, driver := range n.Capabilities.DisabledTaskDrivers {
  5456  			_, _ = hash.Write([]byte(driver))
  5457  		}
  5458  	}
  5459  	if n.NodePoolConfiguration != nil {
  5460  		_, _ = hash.Write([]byte(n.NodePoolConfiguration.Default))
  5461  		for _, pool := range n.NodePoolConfiguration.Allowed {
  5462  			_, _ = hash.Write([]byte(pool))
  5463  		}
  5464  		for _, pool := range n.NodePoolConfiguration.Denied {
  5465  			_, _ = hash.Write([]byte(pool))
  5466  		}
  5467  	}
  5468  
  5469  	// sort keys to ensure hash stability when meta is stored later
  5470  	var keys []string
  5471  	for k := range n.Meta {
  5472  		keys = append(keys, k)
  5473  	}
  5474  	sort.Strings(keys)
  5475  
  5476  	for _, k := range keys {
  5477  		_, _ = hash.Write([]byte(k))
  5478  		_, _ = hash.Write([]byte(n.Meta[k]))
  5479  	}
  5480  
  5481  	// Finalize the hash
  5482  	hashVal := hash.Sum(nil)
  5483  
  5484  	// Set and return the hash
  5485  	n.Hash = hashVal
  5486  	return hashVal
  5487  }
  5488  
  5489  func (n *Namespace) Copy() *Namespace {
  5490  	nc := new(Namespace)
  5491  	*nc = *n
  5492  	nc.Hash = make([]byte, len(n.Hash))
  5493  	if n.Capabilities != nil {
  5494  		c := new(NamespaceCapabilities)
  5495  		*c = *n.Capabilities
  5496  		c.EnabledTaskDrivers = slices.Clone(n.Capabilities.EnabledTaskDrivers)
  5497  		c.DisabledTaskDrivers = slices.Clone(n.Capabilities.DisabledTaskDrivers)
  5498  		nc.Capabilities = c
  5499  	}
  5500  	if n.NodePoolConfiguration != nil {
  5501  		np := new(NamespaceNodePoolConfiguration)
  5502  		*np = *n.NodePoolConfiguration
  5503  		np.Allowed = slices.Clone(n.NodePoolConfiguration.Allowed)
  5504  		np.Denied = slices.Clone(n.NodePoolConfiguration.Denied)
  5505  	}
  5506  	if n.Meta != nil {
  5507  		nc.Meta = make(map[string]string, len(n.Meta))
  5508  		for k, v := range n.Meta {
  5509  			nc.Meta[k] = v
  5510  		}
  5511  	}
  5512  	copy(nc.Hash, n.Hash)
  5513  	return nc
  5514  }
  5515  
  5516  // NamespaceListRequest is used to request a list of namespaces
  5517  type NamespaceListRequest struct {
  5518  	QueryOptions
  5519  }
  5520  
  5521  // NamespaceListResponse is used for a list request
  5522  type NamespaceListResponse struct {
  5523  	Namespaces []*Namespace
  5524  	QueryMeta
  5525  }
  5526  
  5527  // NamespaceSpecificRequest is used to query a specific namespace
  5528  type NamespaceSpecificRequest struct {
  5529  	Name string
  5530  	QueryOptions
  5531  }
  5532  
  5533  // SingleNamespaceResponse is used to return a single namespace
  5534  type SingleNamespaceResponse struct {
  5535  	Namespace *Namespace
  5536  	QueryMeta
  5537  }
  5538  
  5539  // NamespaceSetRequest is used to query a set of namespaces
  5540  type NamespaceSetRequest struct {
  5541  	Namespaces []string
  5542  	QueryOptions
  5543  }
  5544  
  5545  // NamespaceSetResponse is used to return a set of namespaces
  5546  type NamespaceSetResponse struct {
  5547  	Namespaces map[string]*Namespace // Keyed by namespace Name
  5548  	QueryMeta
  5549  }
  5550  
  5551  // NamespaceDeleteRequest is used to delete a set of namespaces
  5552  type NamespaceDeleteRequest struct {
  5553  	Namespaces []string
  5554  	WriteRequest
  5555  }
  5556  
  5557  // NamespaceUpsertRequest is used to upsert a set of namespaces
  5558  type NamespaceUpsertRequest struct {
  5559  	Namespaces []*Namespace
  5560  	WriteRequest
  5561  }
  5562  
  5563  const (
  5564  	// PeriodicSpecCron is used for a cron spec.
  5565  	PeriodicSpecCron = "cron"
  5566  
  5567  	// PeriodicSpecTest is only used by unit tests. It is a sorted, comma
  5568  	// separated list of unix timestamps at which to launch.
  5569  	PeriodicSpecTest = "_internal_test"
  5570  )
  5571  
  5572  // Periodic defines the interval a job should be run at.
  5573  type PeriodicConfig struct {
  5574  	// Enabled determines if the job should be run periodically.
  5575  	Enabled bool
  5576  
  5577  	// Spec specifies the interval the job should be run as. It is parsed based
  5578  	// on the SpecType.
  5579  	Spec string
  5580  
  5581  	// SpecType defines the format of the spec.
  5582  	SpecType string
  5583  
  5584  	// ProhibitOverlap enforces that spawned jobs do not run in parallel.
  5585  	ProhibitOverlap bool
  5586  
  5587  	// TimeZone is the user specified string that determines the time zone to
  5588  	// launch against. The time zones must be specified from IANA Time Zone
  5589  	// database, such as "America/New_York".
  5590  	// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
  5591  	// Reference: https://www.iana.org/time-zones
  5592  	TimeZone string
  5593  
  5594  	// location is the time zone to evaluate the launch time against
  5595  	location *time.Location
  5596  }
  5597  
  5598  func (p *PeriodicConfig) Copy() *PeriodicConfig {
  5599  	if p == nil {
  5600  		return nil
  5601  	}
  5602  	np := new(PeriodicConfig)
  5603  	*np = *p
  5604  	return np
  5605  }
  5606  
  5607  func (p *PeriodicConfig) Validate() error {
  5608  	if !p.Enabled {
  5609  		return nil
  5610  	}
  5611  
  5612  	var mErr multierror.Error
  5613  	if p.Spec == "" {
  5614  		_ = multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
  5615  	}
  5616  
  5617  	// Check if we got a valid time zone
  5618  	if p.TimeZone != "" {
  5619  		if _, err := time.LoadLocation(p.TimeZone); err != nil {
  5620  			_ = multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
  5621  		}
  5622  	}
  5623  
  5624  	switch p.SpecType {
  5625  	case PeriodicSpecCron:
  5626  		// Validate the cron spec
  5627  		if _, err := cronexpr.Parse(p.Spec); err != nil {
  5628  			_ = multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
  5629  		}
  5630  	case PeriodicSpecTest:
  5631  		// No-op
  5632  	default:
  5633  		_ = multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
  5634  	}
  5635  
  5636  	return mErr.ErrorOrNil()
  5637  }
  5638  
  5639  func (p *PeriodicConfig) Canonicalize() {
  5640  	// Load the location
  5641  	l, err := time.LoadLocation(p.TimeZone)
  5642  	if err != nil {
  5643  		p.location = time.UTC
  5644  	}
  5645  
  5646  	p.location = l
  5647  }
  5648  
  5649  // CronParseNext is a helper that parses the next time for the given expression
  5650  // but captures any panic that may occur in the underlying library.
  5651  func CronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) {
  5652  	defer func() {
  5653  		if recover() != nil {
  5654  			t = time.Time{}
  5655  			err = fmt.Errorf("failed parsing cron expression: %q", spec)
  5656  		}
  5657  	}()
  5658  
  5659  	return e.Next(fromTime), nil
  5660  }
  5661  
  5662  // Next returns the closest time instant matching the spec that is after the
  5663  // passed time. If no matching instance exists, the zero value of time.Time is
  5664  // returned. The `time.Location` of the returned value matches that of the
  5665  // passed time.
  5666  func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) {
  5667  	switch p.SpecType {
  5668  	case PeriodicSpecCron:
  5669  		e, err := cronexpr.Parse(p.Spec)
  5670  		if err != nil {
  5671  			return time.Time{}, fmt.Errorf("failed parsing cron expression: %q: %v", p.Spec, err)
  5672  		}
  5673  		return CronParseNext(e, fromTime, p.Spec)
  5674  	case PeriodicSpecTest:
  5675  		split := strings.Split(p.Spec, ",")
  5676  		if len(split) == 1 && split[0] == "" {
  5677  			return time.Time{}, nil
  5678  		}
  5679  
  5680  		// Parse the times
  5681  		times := make([]time.Time, len(split))
  5682  		for i, s := range split {
  5683  			unix, err := strconv.Atoi(s)
  5684  			if err != nil {
  5685  				return time.Time{}, nil
  5686  			}
  5687  
  5688  			times[i] = time.Unix(int64(unix), 0)
  5689  		}
  5690  
  5691  		// Find the next match
  5692  		for _, next := range times {
  5693  			if fromTime.Before(next) {
  5694  				return next, nil
  5695  			}
  5696  		}
  5697  	}
  5698  
  5699  	return time.Time{}, nil
  5700  }
  5701  
  5702  // GetLocation returns the location to use for determining the time zone to run
  5703  // the periodic job against.
  5704  func (p *PeriodicConfig) GetLocation() *time.Location {
  5705  	// Jobs pre 0.5.5 will not have this
  5706  	if p.location != nil {
  5707  		return p.location
  5708  	}
  5709  
  5710  	return time.UTC
  5711  }
  5712  
  5713  const (
  5714  	// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
  5715  	// when launching derived instances of it.
  5716  	PeriodicLaunchSuffix = "/periodic-"
  5717  )
  5718  
  5719  // PeriodicLaunch tracks the last launch time of a periodic job.
  5720  type PeriodicLaunch struct {
  5721  	ID        string    // ID of the periodic job.
  5722  	Namespace string    // Namespace of the periodic job
  5723  	Launch    time.Time // The last launch time.
  5724  
  5725  	// Raft Indexes
  5726  	CreateIndex uint64
  5727  	ModifyIndex uint64
  5728  }
  5729  
  5730  const (
  5731  	DispatchPayloadForbidden = "forbidden"
  5732  	DispatchPayloadOptional  = "optional"
  5733  	DispatchPayloadRequired  = "required"
  5734  
  5735  	// DispatchLaunchSuffix is the string appended to the parameterized job's ID
  5736  	// when dispatching instances of it.
  5737  	DispatchLaunchSuffix = "/dispatch-"
  5738  )
  5739  
  5740  // ParameterizedJobConfig is used to configure the parameterized job
  5741  type ParameterizedJobConfig struct {
  5742  	// Payload configure the payload requirements
  5743  	Payload string
  5744  
  5745  	// MetaRequired is metadata keys that must be specified by the dispatcher
  5746  	MetaRequired []string
  5747  
  5748  	// MetaOptional is metadata keys that may be specified by the dispatcher
  5749  	MetaOptional []string
  5750  }
  5751  
  5752  func (d *ParameterizedJobConfig) Validate() error {
  5753  	var mErr multierror.Error
  5754  	switch d.Payload {
  5755  	case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden:
  5756  	default:
  5757  		_ = multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload))
  5758  	}
  5759  
  5760  	// Check that the meta configurations are disjoint sets
  5761  	disjoint, offending := helper.IsDisjoint(d.MetaRequired, d.MetaOptional)
  5762  	if !disjoint {
  5763  		_ = multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending))
  5764  	}
  5765  
  5766  	return mErr.ErrorOrNil()
  5767  }
  5768  
  5769  func (d *ParameterizedJobConfig) Canonicalize() {
  5770  	if d.Payload == "" {
  5771  		d.Payload = DispatchPayloadOptional
  5772  	}
  5773  }
  5774  
  5775  func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig {
  5776  	if d == nil {
  5777  		return nil
  5778  	}
  5779  	nd := new(ParameterizedJobConfig)
  5780  	*nd = *d
  5781  	nd.MetaOptional = slices.Clone(nd.MetaOptional)
  5782  	nd.MetaRequired = slices.Clone(nd.MetaRequired)
  5783  	return nd
  5784  }
  5785  
  5786  // DispatchedID returns an ID appropriate for a job dispatched against a
  5787  // particular parameterized job
  5788  func DispatchedID(templateID, idPrefixTemplate string, t time.Time) string {
  5789  	u := uuid.Generate()[:8]
  5790  
  5791  	if idPrefixTemplate != "" {
  5792  		return fmt.Sprintf("%s%s%s-%d-%s", templateID, DispatchLaunchSuffix, idPrefixTemplate, t.Unix(), u)
  5793  	}
  5794  
  5795  	return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
  5796  }
  5797  
  5798  // DispatchPayloadConfig configures how a task gets its input from a job dispatch
  5799  type DispatchPayloadConfig struct {
  5800  	// File specifies a relative path to where the input data should be written
  5801  	File string
  5802  }
  5803  
  5804  func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
  5805  	if d == nil {
  5806  		return nil
  5807  	}
  5808  	nd := new(DispatchPayloadConfig)
  5809  	*nd = *d
  5810  	return nd
  5811  }
  5812  
  5813  func (d *DispatchPayloadConfig) Validate() error {
  5814  	// Verify the destination doesn't escape
  5815  	escaped, err := escapingfs.PathEscapesAllocViaRelative("task/local/", d.File)
  5816  	if err != nil {
  5817  		return fmt.Errorf("invalid destination path: %v", err)
  5818  	} else if escaped {
  5819  		return fmt.Errorf("destination escapes allocation directory")
  5820  	}
  5821  
  5822  	return nil
  5823  }
  5824  
  5825  const (
  5826  	TaskLifecycleHookPrestart  = "prestart"
  5827  	TaskLifecycleHookPoststart = "poststart"
  5828  	TaskLifecycleHookPoststop  = "poststop"
  5829  )
  5830  
  5831  type TaskLifecycleConfig struct {
  5832  	Hook    string
  5833  	Sidecar bool
  5834  }
  5835  
  5836  func (d *TaskLifecycleConfig) Copy() *TaskLifecycleConfig {
  5837  	if d == nil {
  5838  		return nil
  5839  	}
  5840  	nd := new(TaskLifecycleConfig)
  5841  	*nd = *d
  5842  	return nd
  5843  }
  5844  
  5845  func (d *TaskLifecycleConfig) Validate() error {
  5846  	if d == nil {
  5847  		return nil
  5848  	}
  5849  
  5850  	switch d.Hook {
  5851  	case TaskLifecycleHookPrestart:
  5852  	case TaskLifecycleHookPoststart:
  5853  	case TaskLifecycleHookPoststop:
  5854  	case "":
  5855  		return fmt.Errorf("no lifecycle hook provided")
  5856  	default:
  5857  		return fmt.Errorf("invalid hook: %v", d.Hook)
  5858  	}
  5859  
  5860  	return nil
  5861  }
  5862  
  5863  var (
  5864  	// These default restart policies needs to be in sync with
  5865  	// Canonicalize in api/tasks.go
  5866  
  5867  	DefaultServiceJobRestartPolicy = RestartPolicy{
  5868  		Delay:    15 * time.Second,
  5869  		Attempts: 2,
  5870  		Interval: 30 * time.Minute,
  5871  		Mode:     RestartPolicyModeFail,
  5872  	}
  5873  	DefaultBatchJobRestartPolicy = RestartPolicy{
  5874  		Delay:    15 * time.Second,
  5875  		Attempts: 3,
  5876  		Interval: 24 * time.Hour,
  5877  		Mode:     RestartPolicyModeFail,
  5878  	}
  5879  )
  5880  
  5881  var (
  5882  	// These default reschedule policies needs to be in sync with
  5883  	// NewDefaultReschedulePolicy in api/tasks.go
  5884  
  5885  	DefaultServiceJobReschedulePolicy = ReschedulePolicy{
  5886  		Delay:         30 * time.Second,
  5887  		DelayFunction: "exponential",
  5888  		MaxDelay:      1 * time.Hour,
  5889  		Unlimited:     true,
  5890  	}
  5891  	DefaultBatchJobReschedulePolicy = ReschedulePolicy{
  5892  		Attempts:      1,
  5893  		Interval:      24 * time.Hour,
  5894  		Delay:         5 * time.Second,
  5895  		DelayFunction: "constant",
  5896  	}
  5897  )
  5898  
  5899  const (
  5900  	// RestartPolicyModeDelay causes an artificial delay till the next interval is
  5901  	// reached when the specified attempts have been reached in the interval.
  5902  	RestartPolicyModeDelay = "delay"
  5903  
  5904  	// RestartPolicyModeFail causes a job to fail if the specified number of
  5905  	// attempts are reached within an interval.
  5906  	RestartPolicyModeFail = "fail"
  5907  
  5908  	// RestartPolicyMinInterval is the minimum interval that is accepted for a
  5909  	// restart policy.
  5910  	RestartPolicyMinInterval = 5 * time.Second
  5911  
  5912  	// ReasonWithinPolicy describes restart events that are within policy
  5913  	ReasonWithinPolicy = "Restart within policy"
  5914  )
  5915  
  5916  // JobScalingEvents contains the scaling events for a given job
  5917  type JobScalingEvents struct {
  5918  	Namespace string
  5919  	JobID     string
  5920  
  5921  	// This map is indexed by target; currently, this is just task group
  5922  	// the indexed array is sorted from newest to oldest event
  5923  	// the array should have less than JobTrackedScalingEvents entries
  5924  	ScalingEvents map[string][]*ScalingEvent
  5925  
  5926  	// Raft index
  5927  	ModifyIndex uint64
  5928  }
  5929  
  5930  // NewScalingEvent method for ScalingEvent objects.
  5931  func NewScalingEvent(message string) *ScalingEvent {
  5932  	return &ScalingEvent{
  5933  		Time:    time.Now().Unix(),
  5934  		Message: message,
  5935  	}
  5936  }
  5937  
  5938  // ScalingEvent describes a scaling event against a Job
  5939  type ScalingEvent struct {
  5940  	// Unix Nanosecond timestamp for the scaling event
  5941  	Time int64
  5942  
  5943  	// Count is the new scaling count, if provided
  5944  	Count *int64
  5945  
  5946  	// PreviousCount is the count at the time of the scaling event
  5947  	PreviousCount int64
  5948  
  5949  	// Message is the message describing a scaling event
  5950  	Message string
  5951  
  5952  	// Error indicates an error state for this scaling event
  5953  	Error bool
  5954  
  5955  	// Meta is a map of metadata returned during a scaling event
  5956  	Meta map[string]interface{}
  5957  
  5958  	// EvalID is the ID for an evaluation if one was created as part of a scaling event
  5959  	EvalID *string
  5960  
  5961  	// Raft index
  5962  	CreateIndex uint64
  5963  }
  5964  
  5965  func (e *ScalingEvent) SetError(error bool) *ScalingEvent {
  5966  	e.Error = error
  5967  	return e
  5968  }
  5969  
  5970  func (e *ScalingEvent) SetMeta(meta map[string]interface{}) *ScalingEvent {
  5971  	e.Meta = meta
  5972  	return e
  5973  }
  5974  
  5975  func (e *ScalingEvent) SetEvalID(evalID string) *ScalingEvent {
  5976  	e.EvalID = &evalID
  5977  	return e
  5978  }
  5979  
  5980  // ScalingEventRequest is by for Job.Scale endpoint
  5981  // to register scaling events
  5982  type ScalingEventRequest struct {
  5983  	Namespace string
  5984  	JobID     string
  5985  	TaskGroup string
  5986  
  5987  	ScalingEvent *ScalingEvent
  5988  }
  5989  
  5990  // ScalingPolicy specifies the scaling policy for a scaling target
  5991  type ScalingPolicy struct {
  5992  	// ID is a generated UUID used for looking up the scaling policy
  5993  	ID string
  5994  
  5995  	// Type is the type of scaling performed by the policy
  5996  	Type string
  5997  
  5998  	// Target contains information about the target of the scaling policy, like job and group
  5999  	Target map[string]string
  6000  
  6001  	// Policy is an opaque description of the scaling policy, passed to the autoscaler
  6002  	Policy map[string]interface{}
  6003  
  6004  	// Min is the minimum allowable scaling count for this target
  6005  	Min int64
  6006  
  6007  	// Max is the maximum allowable scaling count for this target
  6008  	Max int64
  6009  
  6010  	// Enabled indicates whether this policy has been enabled/disabled
  6011  	Enabled bool
  6012  
  6013  	CreateIndex uint64
  6014  	ModifyIndex uint64
  6015  }
  6016  
  6017  // JobKey returns a key that is unique to a job-scoped target, useful as a map
  6018  // key. This uses the policy type, plus target (group and task).
  6019  func (p *ScalingPolicy) JobKey() string {
  6020  	return p.Type + "\000" +
  6021  		p.Target[ScalingTargetGroup] + "\000" +
  6022  		p.Target[ScalingTargetTask]
  6023  }
  6024  
  6025  const (
  6026  	ScalingTargetNamespace = "Namespace"
  6027  	ScalingTargetJob       = "Job"
  6028  	ScalingTargetGroup     = "Group"
  6029  	ScalingTargetTask      = "Task"
  6030  
  6031  	ScalingPolicyTypeHorizontal = "horizontal"
  6032  )
  6033  
  6034  func (p *ScalingPolicy) Canonicalize() {
  6035  	if p.Type == "" {
  6036  		p.Type = ScalingPolicyTypeHorizontal
  6037  	}
  6038  }
  6039  
  6040  func (p *ScalingPolicy) Copy() *ScalingPolicy {
  6041  	if p == nil {
  6042  		return nil
  6043  	}
  6044  
  6045  	opaquePolicyConfig, err := copystructure.Copy(p.Policy)
  6046  	if err != nil {
  6047  		panic(err.Error())
  6048  	}
  6049  
  6050  	c := ScalingPolicy{
  6051  		ID:          p.ID,
  6052  		Policy:      opaquePolicyConfig.(map[string]interface{}),
  6053  		Enabled:     p.Enabled,
  6054  		Type:        p.Type,
  6055  		Min:         p.Min,
  6056  		Max:         p.Max,
  6057  		CreateIndex: p.CreateIndex,
  6058  		ModifyIndex: p.ModifyIndex,
  6059  	}
  6060  	c.Target = make(map[string]string, len(p.Target))
  6061  	for k, v := range p.Target {
  6062  		c.Target[k] = v
  6063  	}
  6064  	return &c
  6065  }
  6066  
  6067  func (p *ScalingPolicy) Validate() error {
  6068  	if p == nil {
  6069  		return nil
  6070  	}
  6071  
  6072  	var mErr multierror.Error
  6073  
  6074  	// Check policy type and target
  6075  	if p.Type == "" {
  6076  		mErr.Errors = append(mErr.Errors, fmt.Errorf("missing scaling policy type"))
  6077  	} else {
  6078  		mErr.Errors = append(mErr.Errors, p.validateType().Errors...)
  6079  	}
  6080  
  6081  	// Check Min and Max
  6082  	if p.Max < 0 {
  6083  		mErr.Errors = append(mErr.Errors,
  6084  			fmt.Errorf("maximum count must be specified and non-negative"))
  6085  	} else if p.Max < p.Min {
  6086  		mErr.Errors = append(mErr.Errors,
  6087  			fmt.Errorf("maximum count must not be less than minimum count"))
  6088  	}
  6089  
  6090  	if p.Min < 0 {
  6091  		mErr.Errors = append(mErr.Errors,
  6092  			fmt.Errorf("minimum count must be specified and non-negative"))
  6093  	}
  6094  
  6095  	return mErr.ErrorOrNil()
  6096  }
  6097  
  6098  func (p *ScalingPolicy) validateTargetHorizontal() (mErr multierror.Error) {
  6099  	if len(p.Target) == 0 {
  6100  		// This is probably not a Nomad horizontal policy
  6101  		return
  6102  	}
  6103  
  6104  	// Nomad horizontal policies should have Namespace, Job and TaskGroup
  6105  	if p.Target[ScalingTargetNamespace] == "" {
  6106  		mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target namespace"))
  6107  	}
  6108  	if p.Target[ScalingTargetJob] == "" {
  6109  		mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target job"))
  6110  	}
  6111  	if p.Target[ScalingTargetGroup] == "" {
  6112  		mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target group"))
  6113  	}
  6114  	return
  6115  }
  6116  
  6117  // Diff indicates whether the specification for a given scaling policy has changed
  6118  func (p *ScalingPolicy) Diff(p2 *ScalingPolicy) bool {
  6119  	copy := *p2
  6120  	copy.ID = p.ID
  6121  	copy.CreateIndex = p.CreateIndex
  6122  	copy.ModifyIndex = p.ModifyIndex
  6123  	return !reflect.DeepEqual(*p, copy)
  6124  }
  6125  
  6126  // TargetTaskGroup updates a ScalingPolicy target to specify a given task group
  6127  func (p *ScalingPolicy) TargetTaskGroup(job *Job, tg *TaskGroup) *ScalingPolicy {
  6128  	p.Target = map[string]string{
  6129  		ScalingTargetNamespace: job.Namespace,
  6130  		ScalingTargetJob:       job.ID,
  6131  		ScalingTargetGroup:     tg.Name,
  6132  	}
  6133  	return p
  6134  }
  6135  
  6136  // TargetTask updates a ScalingPolicy target to specify a given task
  6137  func (p *ScalingPolicy) TargetTask(job *Job, tg *TaskGroup, task *Task) *ScalingPolicy {
  6138  	p.TargetTaskGroup(job, tg)
  6139  	p.Target[ScalingTargetTask] = task.Name
  6140  	return p
  6141  }
  6142  
  6143  func (p *ScalingPolicy) Stub() *ScalingPolicyListStub {
  6144  	stub := &ScalingPolicyListStub{
  6145  		ID:          p.ID,
  6146  		Type:        p.Type,
  6147  		Target:      make(map[string]string),
  6148  		Enabled:     p.Enabled,
  6149  		CreateIndex: p.CreateIndex,
  6150  		ModifyIndex: p.ModifyIndex,
  6151  	}
  6152  	for k, v := range p.Target {
  6153  		stub.Target[k] = v
  6154  	}
  6155  	return stub
  6156  }
  6157  
  6158  // GetScalingPolicies returns a slice of all scaling scaling policies for this job
  6159  func (j *Job) GetScalingPolicies() []*ScalingPolicy {
  6160  	ret := make([]*ScalingPolicy, 0)
  6161  
  6162  	for _, tg := range j.TaskGroups {
  6163  		if tg.Scaling != nil {
  6164  			ret = append(ret, tg.Scaling)
  6165  		}
  6166  	}
  6167  
  6168  	ret = append(ret, j.GetEntScalingPolicies()...)
  6169  
  6170  	return ret
  6171  }
  6172  
  6173  // UsesDeployments returns a boolean indicating whether the job configuration
  6174  // results in a deployment during scheduling.
  6175  func (j *Job) UsesDeployments() bool {
  6176  	switch j.Type {
  6177  	case JobTypeService:
  6178  		return true
  6179  	default:
  6180  		return false
  6181  	}
  6182  }
  6183  
  6184  // ScalingPolicyListStub is used to return a subset of scaling policy information
  6185  // for the scaling policy list
  6186  type ScalingPolicyListStub struct {
  6187  	ID          string
  6188  	Enabled     bool
  6189  	Type        string
  6190  	Target      map[string]string
  6191  	CreateIndex uint64
  6192  	ModifyIndex uint64
  6193  }
  6194  
  6195  // RestartPolicy configures how Tasks are restarted when they crash or fail.
  6196  type RestartPolicy struct {
  6197  	// Attempts is the number of restart that will occur in an interval.
  6198  	Attempts int
  6199  
  6200  	// Interval is a duration in which we can limit the number of restarts
  6201  	// within.
  6202  	Interval time.Duration
  6203  
  6204  	// Delay is the time between a failure and a restart.
  6205  	Delay time.Duration
  6206  
  6207  	// Mode controls what happens when the task restarts more than attempt times
  6208  	// in an interval.
  6209  	Mode string
  6210  }
  6211  
  6212  func (r *RestartPolicy) Copy() *RestartPolicy {
  6213  	if r == nil {
  6214  		return nil
  6215  	}
  6216  	nrp := new(RestartPolicy)
  6217  	*nrp = *r
  6218  	return nrp
  6219  }
  6220  
  6221  func (r *RestartPolicy) Validate() error {
  6222  	var mErr multierror.Error
  6223  	switch r.Mode {
  6224  	case RestartPolicyModeDelay, RestartPolicyModeFail:
  6225  	default:
  6226  		_ = multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode))
  6227  	}
  6228  
  6229  	// Check for ambiguous/confusing settings
  6230  	if r.Attempts == 0 && r.Mode != RestartPolicyModeFail {
  6231  		_ = multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts))
  6232  	}
  6233  
  6234  	if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() {
  6235  		_ = multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval))
  6236  	}
  6237  	if time.Duration(r.Attempts)*r.Delay > r.Interval {
  6238  		_ = multierror.Append(&mErr,
  6239  			fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay))
  6240  	}
  6241  	return mErr.ErrorOrNil()
  6242  }
  6243  
  6244  func NewRestartPolicy(jobType string) *RestartPolicy {
  6245  	switch jobType {
  6246  	case JobTypeService, JobTypeSystem:
  6247  		rp := DefaultServiceJobRestartPolicy
  6248  		return &rp
  6249  	case JobTypeBatch:
  6250  		rp := DefaultBatchJobRestartPolicy
  6251  		return &rp
  6252  	}
  6253  	return nil
  6254  }
  6255  
  6256  const ReschedulePolicyMinInterval = 15 * time.Second
  6257  const ReschedulePolicyMinDelay = 5 * time.Second
  6258  
  6259  var RescheduleDelayFunctions = [...]string{"constant", "exponential", "fibonacci"}
  6260  
  6261  // ReschedulePolicy configures how Tasks are rescheduled  when they crash or fail.
  6262  type ReschedulePolicy struct {
  6263  	// Attempts limits the number of rescheduling attempts that can occur in an interval.
  6264  	Attempts int
  6265  
  6266  	// Interval is a duration in which we can limit the number of reschedule attempts.
  6267  	Interval time.Duration
  6268  
  6269  	// Delay is a minimum duration to wait between reschedule attempts.
  6270  	// The delay function determines how much subsequent reschedule attempts are delayed by.
  6271  	Delay time.Duration
  6272  
  6273  	// DelayFunction determines how the delay progressively changes on subsequent reschedule
  6274  	// attempts. Valid values are "exponential", "constant", and "fibonacci".
  6275  	DelayFunction string
  6276  
  6277  	// MaxDelay is an upper bound on the delay.
  6278  	MaxDelay time.Duration
  6279  
  6280  	// Unlimited allows infinite rescheduling attempts. Only allowed when delay is set
  6281  	// between reschedule attempts.
  6282  	Unlimited bool
  6283  }
  6284  
  6285  func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
  6286  	if r == nil {
  6287  		return nil
  6288  	}
  6289  	nrp := new(ReschedulePolicy)
  6290  	*nrp = *r
  6291  	return nrp
  6292  }
  6293  
  6294  func (r *ReschedulePolicy) Enabled() bool {
  6295  	enabled := r != nil && (r.Attempts > 0 || r.Unlimited)
  6296  	return enabled
  6297  }
  6298  
  6299  // Validate uses different criteria to validate the reschedule policy
  6300  // Delay must be a minimum of 5 seconds
  6301  // Delay Ceiling is ignored if Delay Function is "constant"
  6302  // Number of possible attempts is validated, given the interval, delay and delay function
  6303  func (r *ReschedulePolicy) Validate() error {
  6304  	if !r.Enabled() {
  6305  		return nil
  6306  	}
  6307  	var mErr multierror.Error
  6308  	// Check for ambiguous/confusing settings
  6309  	if r.Attempts > 0 {
  6310  		if r.Interval <= 0 {
  6311  			_ = multierror.Append(&mErr, fmt.Errorf("Interval must be a non zero value if Attempts > 0"))
  6312  		}
  6313  		if r.Unlimited {
  6314  			_ = multierror.Append(&mErr, fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, "+
  6315  				"and Unlimited = %v is ambiguous", r.Attempts, r.Interval, r.Unlimited))
  6316  			_ = multierror.Append(&mErr, errors.New("If Attempts >0, Unlimited cannot also be set to true"))
  6317  		}
  6318  	}
  6319  
  6320  	delayPreCheck := true
  6321  	// Delay should be bigger than the default
  6322  	if r.Delay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() {
  6323  		_ = multierror.Append(&mErr, fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay))
  6324  		delayPreCheck = false
  6325  	}
  6326  
  6327  	// Must use a valid delay function
  6328  	if !isValidDelayFunction(r.DelayFunction) {
  6329  		_ = multierror.Append(&mErr, fmt.Errorf("Invalid delay function %q, must be one of %q", r.DelayFunction, RescheduleDelayFunctions))
  6330  		delayPreCheck = false
  6331  	}
  6332  
  6333  	// Validate MaxDelay if not using linear delay progression
  6334  	if r.DelayFunction != "constant" {
  6335  		if r.MaxDelay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() {
  6336  			_ = multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay))
  6337  			delayPreCheck = false
  6338  		}
  6339  		if r.MaxDelay < r.Delay {
  6340  			_ = multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", r.Delay, r.MaxDelay))
  6341  			delayPreCheck = false
  6342  		}
  6343  
  6344  	}
  6345  
  6346  	// Validate Interval and other delay parameters if attempts are limited
  6347  	if !r.Unlimited {
  6348  		if r.Interval.Nanoseconds() < ReschedulePolicyMinInterval.Nanoseconds() {
  6349  			_ = multierror.Append(&mErr, fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, r.Interval))
  6350  		}
  6351  		if !delayPreCheck {
  6352  			// We can't cross validate the rest of the delay params if delayPreCheck fails, so return early
  6353  			return mErr.ErrorOrNil()
  6354  		}
  6355  		crossValidationErr := r.validateDelayParams()
  6356  		if crossValidationErr != nil {
  6357  			_ = multierror.Append(&mErr, crossValidationErr)
  6358  		}
  6359  	}
  6360  	return mErr.ErrorOrNil()
  6361  }
  6362  
  6363  func isValidDelayFunction(delayFunc string) bool {
  6364  	for _, value := range RescheduleDelayFunctions {
  6365  		if value == delayFunc {
  6366  			return true
  6367  		}
  6368  	}
  6369  	return false
  6370  }
  6371  
  6372  func (r *ReschedulePolicy) validateDelayParams() error {
  6373  	ok, possibleAttempts, recommendedInterval := r.viableAttempts()
  6374  	if ok {
  6375  		return nil
  6376  	}
  6377  	var mErr multierror.Error
  6378  	if r.DelayFunction == "constant" {
  6379  		_ = multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and "+
  6380  			"delay function %q", possibleAttempts, r.Interval, r.Delay, r.DelayFunction))
  6381  	} else {
  6382  		_ = multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  6383  			"delay function %q, and delay ceiling %v", possibleAttempts, r.Interval, r.Delay, r.DelayFunction, r.MaxDelay))
  6384  	}
  6385  	_ = multierror.Append(&mErr, fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", recommendedInterval.Round(time.Second), r.Attempts))
  6386  	return mErr.ErrorOrNil()
  6387  }
  6388  
  6389  func (r *ReschedulePolicy) viableAttempts() (bool, int, time.Duration) {
  6390  	var possibleAttempts int
  6391  	var recommendedInterval time.Duration
  6392  	valid := true
  6393  	switch r.DelayFunction {
  6394  	case "constant":
  6395  		recommendedInterval = time.Duration(r.Attempts) * r.Delay
  6396  		if r.Interval < recommendedInterval {
  6397  			possibleAttempts = int(r.Interval / r.Delay)
  6398  			valid = false
  6399  		}
  6400  	case "exponential":
  6401  		for i := 0; i < r.Attempts; i++ {
  6402  			nextDelay := time.Duration(math.Pow(2, float64(i))) * r.Delay
  6403  			if nextDelay > r.MaxDelay {
  6404  				nextDelay = r.MaxDelay
  6405  				recommendedInterval += nextDelay
  6406  			} else {
  6407  				recommendedInterval = nextDelay
  6408  			}
  6409  			if recommendedInterval < r.Interval {
  6410  				possibleAttempts++
  6411  			}
  6412  		}
  6413  		if possibleAttempts < r.Attempts {
  6414  			valid = false
  6415  		}
  6416  	case "fibonacci":
  6417  		var slots []time.Duration
  6418  		slots = append(slots, r.Delay)
  6419  		slots = append(slots, r.Delay)
  6420  		reachedCeiling := false
  6421  		for i := 2; i < r.Attempts; i++ {
  6422  			var nextDelay time.Duration
  6423  			if reachedCeiling {
  6424  				//switch to linear
  6425  				nextDelay = slots[i-1] + r.MaxDelay
  6426  			} else {
  6427  				nextDelay = slots[i-1] + slots[i-2]
  6428  				if nextDelay > r.MaxDelay {
  6429  					nextDelay = r.MaxDelay
  6430  					reachedCeiling = true
  6431  				}
  6432  			}
  6433  			slots = append(slots, nextDelay)
  6434  		}
  6435  		recommendedInterval = slots[len(slots)-1]
  6436  		if r.Interval < recommendedInterval {
  6437  			valid = false
  6438  			// calculate possible attempts
  6439  			for i := 0; i < len(slots); i++ {
  6440  				if slots[i] > r.Interval {
  6441  					possibleAttempts = i
  6442  					break
  6443  				}
  6444  			}
  6445  		}
  6446  	default:
  6447  		return false, 0, 0
  6448  	}
  6449  	if possibleAttempts < 0 { // can happen if delay is bigger than interval
  6450  		possibleAttempts = 0
  6451  	}
  6452  	return valid, possibleAttempts, recommendedInterval
  6453  }
  6454  
  6455  func NewReschedulePolicy(jobType string) *ReschedulePolicy {
  6456  	switch jobType {
  6457  	case JobTypeService:
  6458  		rp := DefaultServiceJobReschedulePolicy
  6459  		return &rp
  6460  	case JobTypeBatch:
  6461  		rp := DefaultBatchJobReschedulePolicy
  6462  		return &rp
  6463  	}
  6464  	return nil
  6465  }
  6466  
  6467  const (
  6468  	MigrateStrategyHealthChecks = "checks"
  6469  	MigrateStrategyHealthStates = "task_states"
  6470  )
  6471  
  6472  type MigrateStrategy struct {
  6473  	MaxParallel     int
  6474  	HealthCheck     string
  6475  	MinHealthyTime  time.Duration
  6476  	HealthyDeadline time.Duration
  6477  }
  6478  
  6479  // DefaultMigrateStrategy is used for backwards compat with pre-0.8 Allocations
  6480  // that lack an update strategy.
  6481  //
  6482  // This function should match its counterpart in api/tasks.go
  6483  func DefaultMigrateStrategy() *MigrateStrategy {
  6484  	return &MigrateStrategy{
  6485  		MaxParallel:     1,
  6486  		HealthCheck:     MigrateStrategyHealthChecks,
  6487  		MinHealthyTime:  10 * time.Second,
  6488  		HealthyDeadline: 5 * time.Minute,
  6489  	}
  6490  }
  6491  
  6492  func (m *MigrateStrategy) Validate() error {
  6493  	var mErr multierror.Error
  6494  
  6495  	if m.MaxParallel < 0 {
  6496  		_ = multierror.Append(&mErr, fmt.Errorf("MaxParallel must be >= 0 but found %d", m.MaxParallel))
  6497  	}
  6498  
  6499  	switch m.HealthCheck {
  6500  	case MigrateStrategyHealthChecks, MigrateStrategyHealthStates:
  6501  		// ok
  6502  	case "":
  6503  		if m.MaxParallel > 0 {
  6504  			_ = multierror.Append(&mErr, fmt.Errorf("Missing HealthCheck"))
  6505  		}
  6506  	default:
  6507  		_ = multierror.Append(&mErr, fmt.Errorf("Invalid HealthCheck: %q", m.HealthCheck))
  6508  	}
  6509  
  6510  	if m.MinHealthyTime < 0 {
  6511  		_ = multierror.Append(&mErr, fmt.Errorf("MinHealthyTime is %s and must be >= 0", m.MinHealthyTime))
  6512  	}
  6513  
  6514  	if m.HealthyDeadline < 0 {
  6515  		_ = multierror.Append(&mErr, fmt.Errorf("HealthyDeadline is %s and must be >= 0", m.HealthyDeadline))
  6516  	}
  6517  
  6518  	if m.MinHealthyTime > m.HealthyDeadline {
  6519  		_ = multierror.Append(&mErr, fmt.Errorf("MinHealthyTime must be less than HealthyDeadline"))
  6520  	}
  6521  
  6522  	return mErr.ErrorOrNil()
  6523  }
  6524  
  6525  // TaskGroup is an atomic unit of placement. Each task group belongs to
  6526  // a job and may contain any number of tasks. A task group support running
  6527  // in many replicas using the same configuration..
  6528  type TaskGroup struct {
  6529  	// Name of the task group
  6530  	Name string
  6531  
  6532  	// Count is the number of replicas of this task group that should
  6533  	// be scheduled.
  6534  	Count int
  6535  
  6536  	// Update is used to control the update strategy for this task group
  6537  	Update *UpdateStrategy
  6538  
  6539  	// Migrate is used to control the migration strategy for this task group
  6540  	Migrate *MigrateStrategy
  6541  
  6542  	// Constraints can be specified at a task group level and apply to
  6543  	// all the tasks contained.
  6544  	Constraints []*Constraint
  6545  
  6546  	// Scaling is the list of autoscaling policies for the TaskGroup
  6547  	Scaling *ScalingPolicy
  6548  
  6549  	// RestartPolicy of a TaskGroup
  6550  	RestartPolicy *RestartPolicy
  6551  
  6552  	// Tasks are the collection of tasks that this task group needs to run
  6553  	Tasks []*Task
  6554  
  6555  	// EphemeralDisk is the disk resources that the task group requests
  6556  	EphemeralDisk *EphemeralDisk
  6557  
  6558  	// Meta is used to associate arbitrary metadata with this
  6559  	// task group. This is opaque to Nomad.
  6560  	Meta map[string]string
  6561  
  6562  	// ReschedulePolicy is used to configure how the scheduler should
  6563  	// retry failed allocations.
  6564  	ReschedulePolicy *ReschedulePolicy
  6565  
  6566  	// Affinities can be specified at the task group level to express
  6567  	// scheduling preferences.
  6568  	Affinities []*Affinity
  6569  
  6570  	// Spread can be specified at the task group level to express spreading
  6571  	// allocations across a desired attribute, such as datacenter
  6572  	Spreads []*Spread
  6573  
  6574  	// Networks are the network configuration for the task group. This can be
  6575  	// overridden in the task.
  6576  	Networks Networks
  6577  
  6578  	// Consul configuration specific to this task group
  6579  	Consul *Consul
  6580  
  6581  	// Services this group provides
  6582  	Services []*Service
  6583  
  6584  	// Volumes is a map of volumes that have been requested by the task group.
  6585  	Volumes map[string]*VolumeRequest
  6586  
  6587  	// ShutdownDelay is the amount of time to wait between deregistering
  6588  	// group services in consul and stopping tasks.
  6589  	ShutdownDelay *time.Duration
  6590  
  6591  	// StopAfterClientDisconnect, if set, configures the client to stop the task group
  6592  	// after this duration since the last known good heartbeat
  6593  	StopAfterClientDisconnect *time.Duration
  6594  
  6595  	// MaxClientDisconnect, if set, configures the client to allow placed
  6596  	// allocations for tasks in this group to attempt to resume running without a restart.
  6597  	MaxClientDisconnect *time.Duration
  6598  }
  6599  
  6600  func (tg *TaskGroup) Copy() *TaskGroup {
  6601  	if tg == nil {
  6602  		return nil
  6603  	}
  6604  	ntg := new(TaskGroup)
  6605  	*ntg = *tg
  6606  	ntg.Update = ntg.Update.Copy()
  6607  	ntg.Constraints = CopySliceConstraints(ntg.Constraints)
  6608  	ntg.RestartPolicy = ntg.RestartPolicy.Copy()
  6609  	ntg.ReschedulePolicy = ntg.ReschedulePolicy.Copy()
  6610  	ntg.Affinities = CopySliceAffinities(ntg.Affinities)
  6611  	ntg.Spreads = CopySliceSpreads(ntg.Spreads)
  6612  	ntg.Volumes = CopyMapVolumeRequest(ntg.Volumes)
  6613  	ntg.Scaling = ntg.Scaling.Copy()
  6614  	ntg.Consul = ntg.Consul.Copy()
  6615  
  6616  	// Copy the network objects
  6617  	if tg.Networks != nil {
  6618  		n := len(tg.Networks)
  6619  		ntg.Networks = make([]*NetworkResource, n)
  6620  		for i := 0; i < n; i++ {
  6621  			ntg.Networks[i] = tg.Networks[i].Copy()
  6622  		}
  6623  	}
  6624  
  6625  	if tg.Tasks != nil {
  6626  		tasks := make([]*Task, len(ntg.Tasks))
  6627  		for i, t := range ntg.Tasks {
  6628  			tasks[i] = t.Copy()
  6629  		}
  6630  		ntg.Tasks = tasks
  6631  	}
  6632  
  6633  	ntg.Meta = maps.Clone(ntg.Meta)
  6634  
  6635  	if tg.EphemeralDisk != nil {
  6636  		ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
  6637  	}
  6638  
  6639  	if tg.Services != nil {
  6640  		ntg.Services = make([]*Service, len(tg.Services))
  6641  		for i, s := range tg.Services {
  6642  			ntg.Services[i] = s.Copy()
  6643  		}
  6644  	}
  6645  
  6646  	if tg.ShutdownDelay != nil {
  6647  		ntg.ShutdownDelay = tg.ShutdownDelay
  6648  	}
  6649  
  6650  	if tg.StopAfterClientDisconnect != nil {
  6651  		ntg.StopAfterClientDisconnect = tg.StopAfterClientDisconnect
  6652  	}
  6653  
  6654  	if tg.MaxClientDisconnect != nil {
  6655  		ntg.MaxClientDisconnect = tg.MaxClientDisconnect
  6656  	}
  6657  
  6658  	return ntg
  6659  }
  6660  
  6661  // Canonicalize is used to canonicalize fields in the TaskGroup.
  6662  func (tg *TaskGroup) Canonicalize(job *Job) {
  6663  	// Ensure that an empty and nil map are treated the same to avoid scheduling
  6664  	// problems since we use reflect DeepEquals.
  6665  	if len(tg.Meta) == 0 {
  6666  		tg.Meta = nil
  6667  	}
  6668  
  6669  	// Set the default restart policy.
  6670  	if tg.RestartPolicy == nil {
  6671  		tg.RestartPolicy = NewRestartPolicy(job.Type)
  6672  	}
  6673  
  6674  	if tg.ReschedulePolicy == nil {
  6675  		tg.ReschedulePolicy = NewReschedulePolicy(job.Type)
  6676  	}
  6677  
  6678  	// Canonicalize Migrate for service jobs
  6679  	if job.Type == JobTypeService && tg.Migrate == nil {
  6680  		tg.Migrate = DefaultMigrateStrategy()
  6681  	}
  6682  
  6683  	// Set a default ephemeral disk object if the user has not requested for one
  6684  	if tg.EphemeralDisk == nil {
  6685  		tg.EphemeralDisk = DefaultEphemeralDisk()
  6686  	}
  6687  
  6688  	if job.Type == JobTypeSystem && tg.Count == 0 {
  6689  		tg.Count = 1
  6690  	}
  6691  
  6692  	if tg.Scaling != nil {
  6693  		tg.Scaling.Canonicalize()
  6694  	}
  6695  
  6696  	for _, service := range tg.Services {
  6697  		service.Canonicalize(job.Name, tg.Name, "group", job.Namespace)
  6698  	}
  6699  
  6700  	for _, network := range tg.Networks {
  6701  		network.Canonicalize()
  6702  	}
  6703  
  6704  	for _, task := range tg.Tasks {
  6705  		task.Canonicalize(job, tg)
  6706  	}
  6707  }
  6708  
  6709  // NomadServices returns a list of all group and task - level services in tg that
  6710  // are making use of the nomad service provider.
  6711  func (tg *TaskGroup) NomadServices() []*Service {
  6712  	return tg.filterServices(func(s *Service) bool {
  6713  		return s.Provider == ServiceProviderNomad
  6714  	})
  6715  }
  6716  
  6717  func (tg *TaskGroup) ConsulServices() []*Service {
  6718  	return tg.filterServices(func(s *Service) bool {
  6719  		return s.Provider == ServiceProviderConsul || s.Provider == ""
  6720  	})
  6721  }
  6722  
  6723  func (tg *TaskGroup) filterServices(f func(s *Service) bool) []*Service {
  6724  	var services []*Service
  6725  	for _, service := range tg.Services {
  6726  		if f(service) {
  6727  			services = append(services, service)
  6728  		}
  6729  	}
  6730  	for _, task := range tg.Tasks {
  6731  		for _, service := range task.Services {
  6732  			if f(service) {
  6733  				services = append(services, service)
  6734  			}
  6735  		}
  6736  	}
  6737  	return services
  6738  }
  6739  
  6740  // Validate is used to check a task group for reasonable configuration
  6741  func (tg *TaskGroup) Validate(j *Job) error {
  6742  	var mErr multierror.Error
  6743  	if tg.Name == "" {
  6744  		mErr.Errors = append(mErr.Errors, errors.New("Missing task group name"))
  6745  	} else if strings.Contains(tg.Name, "\000") {
  6746  		mErr.Errors = append(mErr.Errors, errors.New("Task group name contains null character"))
  6747  	}
  6748  	if tg.Count < 0 {
  6749  		mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative"))
  6750  	}
  6751  	if len(tg.Tasks) == 0 {
  6752  		// could be a lone consul gateway inserted by the connect mutator
  6753  		mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group"))
  6754  	}
  6755  
  6756  	if tg.MaxClientDisconnect != nil && tg.StopAfterClientDisconnect != nil {
  6757  		mErr.Errors = append(mErr.Errors, errors.New("Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect"))
  6758  	}
  6759  
  6760  	if tg.MaxClientDisconnect != nil && *tg.MaxClientDisconnect < 0 {
  6761  		mErr.Errors = append(mErr.Errors, errors.New("max_client_disconnect cannot be negative"))
  6762  	}
  6763  
  6764  	for idx, constr := range tg.Constraints {
  6765  		if err := constr.Validate(); err != nil {
  6766  			outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
  6767  			mErr.Errors = append(mErr.Errors, outer)
  6768  		}
  6769  	}
  6770  	if j.Type == JobTypeSystem {
  6771  		if tg.Affinities != nil {
  6772  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block"))
  6773  		}
  6774  	} else {
  6775  		for idx, affinity := range tg.Affinities {
  6776  			if err := affinity.Validate(); err != nil {
  6777  				outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
  6778  				mErr.Errors = append(mErr.Errors, outer)
  6779  			}
  6780  		}
  6781  	}
  6782  
  6783  	if tg.RestartPolicy != nil {
  6784  		if err := tg.RestartPolicy.Validate(); err != nil {
  6785  			mErr.Errors = append(mErr.Errors, err)
  6786  		}
  6787  	} else {
  6788  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name))
  6789  	}
  6790  
  6791  	if j.Type == JobTypeSystem {
  6792  		if tg.Spreads != nil {
  6793  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block"))
  6794  		}
  6795  	} else {
  6796  		for idx, spread := range tg.Spreads {
  6797  			if err := spread.Validate(); err != nil {
  6798  				outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err)
  6799  				mErr.Errors = append(mErr.Errors, outer)
  6800  			}
  6801  		}
  6802  	}
  6803  
  6804  	if j.Type == JobTypeSystem {
  6805  		if tg.ReschedulePolicy != nil {
  6806  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs should not have a reschedule policy"))
  6807  		}
  6808  	} else {
  6809  		if tg.ReschedulePolicy != nil {
  6810  			if err := tg.ReschedulePolicy.Validate(); err != nil {
  6811  				mErr.Errors = append(mErr.Errors, err)
  6812  			}
  6813  		} else {
  6814  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a reschedule policy", tg.Name))
  6815  		}
  6816  	}
  6817  
  6818  	if tg.EphemeralDisk != nil {
  6819  		if err := tg.EphemeralDisk.Validate(); err != nil {
  6820  			mErr.Errors = append(mErr.Errors, err)
  6821  		}
  6822  	} else {
  6823  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
  6824  	}
  6825  
  6826  	// Validate the update strategy
  6827  	if u := tg.Update; u != nil {
  6828  		switch j.Type {
  6829  		case JobTypeService, JobTypeSystem:
  6830  		default:
  6831  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type))
  6832  		}
  6833  		if err := u.Validate(); err != nil {
  6834  			mErr.Errors = append(mErr.Errors, err)
  6835  		}
  6836  	}
  6837  
  6838  	// Validate the migration strategy
  6839  	switch j.Type {
  6840  	case JobTypeService:
  6841  		if tg.Migrate != nil {
  6842  			if err := tg.Migrate.Validate(); err != nil {
  6843  				mErr.Errors = append(mErr.Errors, err)
  6844  			}
  6845  		}
  6846  	default:
  6847  		if tg.Migrate != nil {
  6848  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow migrate block", j.Type))
  6849  		}
  6850  	}
  6851  
  6852  	// Check that there is only one leader task if any
  6853  	tasks := make(map[string]int)
  6854  	leaderTasks := 0
  6855  	for idx, task := range tg.Tasks {
  6856  		if task.Name == "" {
  6857  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1))
  6858  		} else if existing, ok := tasks[task.Name]; ok {
  6859  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1))
  6860  		} else {
  6861  			tasks[task.Name] = idx
  6862  		}
  6863  
  6864  		if task.Leader {
  6865  			leaderTasks++
  6866  		}
  6867  	}
  6868  
  6869  	if leaderTasks > 1 {
  6870  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
  6871  	}
  6872  
  6873  	// Validate the volume requests
  6874  	var canaries int
  6875  	if tg.Update != nil {
  6876  		canaries = tg.Update.Canary
  6877  	}
  6878  	for name, volReq := range tg.Volumes {
  6879  		if err := volReq.Validate(j.Type, tg.Count, canaries); err != nil {
  6880  			mErr.Errors = append(mErr.Errors, fmt.Errorf(
  6881  				"Task group volume validation for %s failed: %v", name, err))
  6882  		}
  6883  	}
  6884  
  6885  	// Validate task group and task network resources
  6886  	if err := tg.validateNetworks(); err != nil {
  6887  		outer := fmt.Errorf("Task group network validation failed: %v", err)
  6888  		mErr.Errors = append(mErr.Errors, outer)
  6889  	}
  6890  
  6891  	// Validate task group and task services
  6892  	if err := tg.validateServices(); err != nil {
  6893  		outer := fmt.Errorf("Task group service validation failed: %v", err)
  6894  		mErr.Errors = append(mErr.Errors, outer)
  6895  	}
  6896  
  6897  	// Validate group service script-checks
  6898  	if err := tg.validateScriptChecksInGroupServices(); err != nil {
  6899  		outer := fmt.Errorf("Task group service check validation failed: %v", err)
  6900  		mErr.Errors = append(mErr.Errors, outer)
  6901  	}
  6902  
  6903  	// Validate the scaling policy
  6904  	if err := tg.validateScalingPolicy(j); err != nil {
  6905  		outer := fmt.Errorf("Task group scaling policy validation failed: %v", err)
  6906  		mErr.Errors = append(mErr.Errors, outer)
  6907  	}
  6908  
  6909  	// Validate the tasks
  6910  	for _, task := range tg.Tasks {
  6911  		if err := task.Validate(j.Type, tg); err != nil {
  6912  			outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
  6913  			mErr.Errors = append(mErr.Errors, outer)
  6914  		}
  6915  	}
  6916  
  6917  	return mErr.ErrorOrNil()
  6918  }
  6919  
  6920  func (tg *TaskGroup) validateNetworks() error {
  6921  	var mErr multierror.Error
  6922  	portLabels := make(map[string]string)
  6923  	// host_network -> static port tracking
  6924  	staticPortsIndex := make(map[string]map[int]string)
  6925  
  6926  	for _, net := range tg.Networks {
  6927  		for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
  6928  			if other, ok := portLabels[port.Label]; ok {
  6929  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
  6930  			} else {
  6931  				portLabels[port.Label] = "taskgroup network"
  6932  			}
  6933  
  6934  			if port.Value != 0 {
  6935  				hostNetwork := port.HostNetwork
  6936  				if hostNetwork == "" {
  6937  					hostNetwork = "default"
  6938  				}
  6939  				staticPorts, ok := staticPortsIndex[hostNetwork]
  6940  				if !ok {
  6941  					staticPorts = make(map[int]string)
  6942  				}
  6943  				// static port
  6944  				if other, ok := staticPorts[port.Value]; ok {
  6945  					err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
  6946  					mErr.Errors = append(mErr.Errors, err)
  6947  				} else if port.Value > math.MaxUint16 {
  6948  					err := fmt.Errorf("Port %s (%d) cannot be greater than %d", port.Label, port.Value, math.MaxUint16)
  6949  					mErr.Errors = append(mErr.Errors, err)
  6950  				} else {
  6951  					staticPorts[port.Value] = fmt.Sprintf("taskgroup network:%s", port.Label)
  6952  					staticPortsIndex[hostNetwork] = staticPorts
  6953  				}
  6954  			}
  6955  
  6956  			if port.To < -1 {
  6957  				err := fmt.Errorf("Port %q cannot be mapped to negative value %d", port.Label, port.To)
  6958  				mErr.Errors = append(mErr.Errors, err)
  6959  			} else if port.To > math.MaxUint16 {
  6960  				err := fmt.Errorf("Port %q cannot be mapped to a port (%d) greater than %d", port.Label, port.To, math.MaxUint16)
  6961  				mErr.Errors = append(mErr.Errors, err)
  6962  			}
  6963  		}
  6964  
  6965  		// Validate the hostname field to be a valid DNS name. If the parameter
  6966  		// looks like it includes an interpolation value, we skip this. It
  6967  		// would be nice to validate additional parameters, but this isn't the
  6968  		// right place.
  6969  		if net.Hostname != "" && !strings.Contains(net.Hostname, "${") {
  6970  			if _, ok := dns.IsDomainName(net.Hostname); !ok {
  6971  				mErr.Errors = append(mErr.Errors, errors.New("Hostname is not a valid DNS name"))
  6972  			}
  6973  		}
  6974  	}
  6975  
  6976  	// Check for duplicate tasks or port labels, and no duplicated static ports
  6977  	for _, task := range tg.Tasks {
  6978  		if task.Resources == nil {
  6979  			continue
  6980  		}
  6981  
  6982  		for _, net := range task.Resources.Networks {
  6983  			for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
  6984  				if other, ok := portLabels[port.Label]; ok {
  6985  					mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
  6986  				}
  6987  
  6988  				if port.Value != 0 {
  6989  					hostNetwork := port.HostNetwork
  6990  					if hostNetwork == "" {
  6991  						hostNetwork = "default"
  6992  					}
  6993  					staticPorts, ok := staticPortsIndex[hostNetwork]
  6994  					if !ok {
  6995  						staticPorts = make(map[int]string)
  6996  					}
  6997  					if other, ok := staticPorts[port.Value]; ok {
  6998  						err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
  6999  						mErr.Errors = append(mErr.Errors, err)
  7000  					} else if port.Value > math.MaxUint16 {
  7001  						err := fmt.Errorf("Port %s (%d) cannot be greater than %d", port.Label, port.Value, math.MaxUint16)
  7002  						mErr.Errors = append(mErr.Errors, err)
  7003  					} else {
  7004  						staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
  7005  						staticPortsIndex[hostNetwork] = staticPorts
  7006  					}
  7007  				}
  7008  			}
  7009  		}
  7010  	}
  7011  	return mErr.ErrorOrNil()
  7012  }
  7013  
  7014  // validateServices runs Service.Validate() on group-level services, checks
  7015  // group service checks that refer to tasks only refer to tasks that exist.
  7016  func (tg *TaskGroup) validateServices() error {
  7017  	var mErr multierror.Error
  7018  
  7019  	// Accumulate task names in this group
  7020  	taskSet := set.New[string](len(tg.Tasks))
  7021  
  7022  	// each service in a group must be unique (i.e. used in MakeAllocServiceID)
  7023  	type unique struct {
  7024  		name string
  7025  		task string
  7026  		port string
  7027  	}
  7028  
  7029  	// Accumulate service IDs in this group
  7030  	idSet := set.New[unique](0)
  7031  
  7032  	// Accumulate IDs that are duplicates
  7033  	idDuplicateSet := set.New[unique](0)
  7034  
  7035  	// Accumulate the providers used for this task group. Currently, Nomad only
  7036  	// allows the use of a single service provider within a task group.
  7037  	providerSet := set.New[string](1)
  7038  
  7039  	// Create a map of known tasks and their services so we can compare
  7040  	// vs the group-level services and checks
  7041  	for _, task := range tg.Tasks {
  7042  		taskSet.Insert(task.Name)
  7043  
  7044  		if len(task.Services) == 0 {
  7045  			continue
  7046  		}
  7047  
  7048  		for _, service := range task.Services {
  7049  
  7050  			// Ensure no task-level service can only specify the task it belongs to.
  7051  			if service.TaskName != "" && service.TaskName != task.Name {
  7052  				mErr.Errors = append(mErr.Errors,
  7053  					fmt.Errorf("Service %s is invalid: may only specify task the service belongs to, got %q", service.Name, service.TaskName),
  7054  				)
  7055  			}
  7056  
  7057  			// Ensure no task-level checks can only specify the task they belong to.
  7058  			for _, check := range service.Checks {
  7059  				if check.TaskName != "" && check.TaskName != task.Name {
  7060  					mErr.Errors = append(mErr.Errors,
  7061  						fmt.Errorf("Check %s is invalid: may only specify task the check belongs to, got %q", check.Name, check.TaskName),
  7062  					)
  7063  				}
  7064  			}
  7065  
  7066  			// Track that we have seen this service id
  7067  			id := unique{service.Name, task.Name, service.PortLabel}
  7068  			if !idSet.Insert(id) {
  7069  				// accumulate duplicates for a single error later on
  7070  				idDuplicateSet.Insert(id)
  7071  			}
  7072  
  7073  			// Track that we have seen this service provider
  7074  			providerSet.Insert(service.Provider)
  7075  		}
  7076  	}
  7077  
  7078  	for i, service := range tg.Services {
  7079  
  7080  		// Track that we have seen this service id
  7081  		id := unique{service.Name, "group", service.PortLabel}
  7082  		if !idSet.Insert(id) {
  7083  			// accumulate duplicates for a single error later on
  7084  			idDuplicateSet.Insert(id)
  7085  		}
  7086  
  7087  		// Track that we have seen this service provider
  7088  		providerSet.Insert(service.Provider)
  7089  
  7090  		if err := service.Validate(); err != nil {
  7091  			outer := fmt.Errorf("Service[%d] %s validation failed: %s", i, service.Name, err)
  7092  			mErr.Errors = append(mErr.Errors, outer)
  7093  			// we break here to avoid the risk of crashing on null-pointer
  7094  			// access in a later step, accepting that we might miss out on
  7095  			// error messages to provide the user.
  7096  			continue
  7097  		}
  7098  		if service.AddressMode == AddressModeDriver {
  7099  			mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot use address_mode=\"driver\", only services defined in a \"task\" block can use this mode", service.Name))
  7100  		}
  7101  
  7102  		for _, check := range service.Checks {
  7103  			if check.TaskName != "" {
  7104  				if check.AddressMode == AddressModeDriver {
  7105  					mErr.Errors = append(mErr.Errors, fmt.Errorf("Check %q invalid: cannot use address_mode=\"driver\", only checks defined in a \"task\" service block can use this mode", service.Name))
  7106  				}
  7107  				if !taskSet.Contains(check.TaskName) {
  7108  					mErr.Errors = append(mErr.Errors,
  7109  						fmt.Errorf("Check %s invalid: refers to non-existent task %s", check.Name, check.TaskName))
  7110  				}
  7111  			}
  7112  		}
  7113  	}
  7114  
  7115  	// Produce an error of any services which are not unique enough in the group
  7116  	// i.e. have same <task, name, port>
  7117  	if idDuplicateSet.Size() > 0 {
  7118  		mErr.Errors = append(mErr.Errors,
  7119  			fmt.Errorf(
  7120  				"Services are not unique: %s",
  7121  				idDuplicateSet.StringFunc(
  7122  					func(u unique) string {
  7123  						s := u.task + "->" + u.name
  7124  						if u.port != "" {
  7125  							s += ":" + u.port
  7126  						}
  7127  						return s
  7128  					},
  7129  				),
  7130  			),
  7131  		)
  7132  	}
  7133  
  7134  	// The initial feature release of native service discovery only allows for
  7135  	// a single service provider to be used across all services in a task
  7136  	// group.
  7137  	if providerSet.Size() > 1 {
  7138  		mErr.Errors = append(mErr.Errors,
  7139  			errors.New("Multiple service providers used: task group services must use the same provider"))
  7140  	}
  7141  
  7142  	return mErr.ErrorOrNil()
  7143  }
  7144  
  7145  // validateScriptChecksInGroupServices ensures group-level services with script
  7146  // checks know what task driver to use. Either the service.task or service.check.task
  7147  // parameter must be configured.
  7148  func (tg *TaskGroup) validateScriptChecksInGroupServices() error {
  7149  	var mErr multierror.Error
  7150  	for _, service := range tg.Services {
  7151  		if service.TaskName == "" {
  7152  			for _, check := range service.Checks {
  7153  				if check.Type == "script" && check.TaskName == "" {
  7154  					mErr.Errors = append(mErr.Errors,
  7155  						fmt.Errorf("Service [%s]->%s or Check %s must specify task parameter",
  7156  							tg.Name, service.Name, check.Name,
  7157  						))
  7158  				}
  7159  			}
  7160  		}
  7161  	}
  7162  	return mErr.ErrorOrNil()
  7163  }
  7164  
  7165  // validateScalingPolicy ensures that the scaling policy has consistent
  7166  // min and max, not in conflict with the task group count
  7167  func (tg *TaskGroup) validateScalingPolicy(j *Job) error {
  7168  	if tg.Scaling == nil {
  7169  		return nil
  7170  	}
  7171  
  7172  	var mErr multierror.Error
  7173  
  7174  	err := tg.Scaling.Validate()
  7175  	if err != nil {
  7176  		// prefix scaling policy errors
  7177  		if me, ok := err.(*multierror.Error); ok {
  7178  			for _, e := range me.Errors {
  7179  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Scaling policy invalid: %s", e))
  7180  			}
  7181  		}
  7182  	}
  7183  
  7184  	if tg.Scaling.Max < int64(tg.Count) {
  7185  		mErr.Errors = append(mErr.Errors,
  7186  			fmt.Errorf("Scaling policy invalid: task group count must not be greater than maximum count in scaling policy"))
  7187  	}
  7188  
  7189  	if int64(tg.Count) < tg.Scaling.Min && !(j.IsMultiregion() && tg.Count == 0 && j.Region == "global") {
  7190  		mErr.Errors = append(mErr.Errors,
  7191  			fmt.Errorf("Scaling policy invalid: task group count must not be less than minimum count in scaling policy"))
  7192  	}
  7193  
  7194  	return mErr.ErrorOrNil()
  7195  }
  7196  
  7197  // Warnings returns a list of warnings that may be from dubious settings or
  7198  // deprecation warnings.
  7199  func (tg *TaskGroup) Warnings(j *Job) error {
  7200  	var mErr multierror.Error
  7201  
  7202  	// Validate the update strategy
  7203  	if u := tg.Update; u != nil {
  7204  		// Check the counts are appropriate
  7205  		if tg.Count > 1 && u.MaxParallel > tg.Count && !(j.IsMultiregion() && tg.Count == 0) {
  7206  			mErr.Errors = append(mErr.Errors,
  7207  				fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+
  7208  					"A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count))
  7209  		}
  7210  	}
  7211  
  7212  	// Check for mbits network field
  7213  	if len(tg.Networks) > 0 && tg.Networks[0].MBits > 0 {
  7214  		mErr.Errors = append(mErr.Errors, fmt.Errorf("mbits has been deprecated as of Nomad 0.12.0. Please remove mbits from the network block"))
  7215  	}
  7216  
  7217  	for _, t := range tg.Tasks {
  7218  		if err := t.Warnings(); err != nil {
  7219  			err = multierror.Prefix(err, fmt.Sprintf("Task %q:", t.Name))
  7220  			mErr.Errors = append(mErr.Errors, err)
  7221  		}
  7222  	}
  7223  
  7224  	return mErr.ErrorOrNil()
  7225  }
  7226  
  7227  // LookupTask finds a task by name
  7228  func (tg *TaskGroup) LookupTask(name string) *Task {
  7229  	for _, t := range tg.Tasks {
  7230  		if t.Name == name {
  7231  			return t
  7232  		}
  7233  	}
  7234  	return nil
  7235  }
  7236  
  7237  // UsesConnect for convenience returns true if the TaskGroup contains at least
  7238  // one service that makes use of Consul Connect features.
  7239  //
  7240  // Currently used for validating that the task group contains one or more connect
  7241  // aware services before generating a service identity token.
  7242  func (tg *TaskGroup) UsesConnect() bool {
  7243  	for _, service := range tg.Services {
  7244  		if service.Connect != nil {
  7245  			if service.Connect.IsNative() || service.Connect.HasSidecar() || service.Connect.IsGateway() {
  7246  				return true
  7247  			}
  7248  		}
  7249  	}
  7250  	return false
  7251  }
  7252  
  7253  // UsesConnectGateway for convenience returns true if the TaskGroup contains at
  7254  // least one service that makes use of Consul Connect Gateway features.
  7255  func (tg *TaskGroup) UsesConnectGateway() bool {
  7256  	for _, service := range tg.Services {
  7257  		if service.Connect != nil {
  7258  			if service.Connect.IsGateway() {
  7259  				return true
  7260  			}
  7261  		}
  7262  	}
  7263  	return false
  7264  }
  7265  
  7266  func (tg *TaskGroup) GoString() string {
  7267  	return fmt.Sprintf("*%#v", *tg)
  7268  }
  7269  
  7270  // CheckRestart describes if and when a task should be restarted based on
  7271  // failing health checks.
  7272  type CheckRestart struct {
  7273  	Limit          int           // Restart task after this many unhealthy intervals
  7274  	Grace          time.Duration // Grace time to give tasks after starting to get healthy
  7275  	IgnoreWarnings bool          // If true treat checks in `warning` as passing
  7276  }
  7277  
  7278  func (c *CheckRestart) Copy() *CheckRestart {
  7279  	if c == nil {
  7280  		return nil
  7281  	}
  7282  
  7283  	nc := new(CheckRestart)
  7284  	*nc = *c
  7285  	return nc
  7286  }
  7287  
  7288  func (c *CheckRestart) Equal(o *CheckRestart) bool {
  7289  	if c == nil || o == nil {
  7290  		return c == o
  7291  	}
  7292  
  7293  	if c.Limit != o.Limit {
  7294  		return false
  7295  	}
  7296  
  7297  	if c.Grace != o.Grace {
  7298  		return false
  7299  	}
  7300  
  7301  	if c.IgnoreWarnings != o.IgnoreWarnings {
  7302  		return false
  7303  	}
  7304  
  7305  	return true
  7306  }
  7307  
  7308  func (c *CheckRestart) Validate() error {
  7309  	if c == nil {
  7310  		return nil
  7311  	}
  7312  
  7313  	var mErr multierror.Error
  7314  	if c.Limit < 0 {
  7315  		mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit))
  7316  	}
  7317  
  7318  	if c.Grace < 0 {
  7319  		mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace))
  7320  	}
  7321  
  7322  	return mErr.ErrorOrNil()
  7323  }
  7324  
  7325  const (
  7326  	// DefaultKillTimeout is the default timeout between signaling a task it
  7327  	// will be killed and killing it.
  7328  	DefaultKillTimeout = 5 * time.Second
  7329  )
  7330  
  7331  // LogConfig provides configuration for log rotation
  7332  type LogConfig struct {
  7333  	MaxFiles      int
  7334  	MaxFileSizeMB int
  7335  	Disabled      bool
  7336  }
  7337  
  7338  func (l *LogConfig) Equal(o *LogConfig) bool {
  7339  	if l == nil || o == nil {
  7340  		return l == o
  7341  	}
  7342  
  7343  	if l.MaxFiles != o.MaxFiles {
  7344  		return false
  7345  	}
  7346  
  7347  	if l.MaxFileSizeMB != o.MaxFileSizeMB {
  7348  		return false
  7349  	}
  7350  
  7351  	if l.Disabled != o.Disabled {
  7352  		return false
  7353  	}
  7354  
  7355  	return true
  7356  }
  7357  
  7358  func (l *LogConfig) Copy() *LogConfig {
  7359  	if l == nil {
  7360  		return nil
  7361  	}
  7362  	return &LogConfig{
  7363  		MaxFiles:      l.MaxFiles,
  7364  		MaxFileSizeMB: l.MaxFileSizeMB,
  7365  		Disabled:      l.Disabled,
  7366  	}
  7367  }
  7368  
  7369  // DefaultLogConfig returns the default LogConfig values.
  7370  func DefaultLogConfig() *LogConfig {
  7371  	return &LogConfig{
  7372  		MaxFiles:      10,
  7373  		MaxFileSizeMB: 10,
  7374  		Disabled:      false,
  7375  	}
  7376  }
  7377  
  7378  // Validate returns an error if the log config specified are less than the
  7379  // minimum allowed. Note that because we have a non-zero default MaxFiles and
  7380  // MaxFileSizeMB, we can't validate that they're unset if Disabled=true
  7381  func (l *LogConfig) Validate(disk *EphemeralDisk) error {
  7382  	var mErr multierror.Error
  7383  	if l.MaxFiles < 1 {
  7384  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles))
  7385  	}
  7386  	if l.MaxFileSizeMB < 1 {
  7387  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB))
  7388  	}
  7389  	if disk != nil {
  7390  		logUsage := (l.MaxFiles * l.MaxFileSizeMB)
  7391  		if disk.SizeMB <= logUsage {
  7392  			mErr.Errors = append(mErr.Errors,
  7393  				fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)",
  7394  					logUsage, disk.SizeMB))
  7395  		}
  7396  	}
  7397  	return mErr.ErrorOrNil()
  7398  }
  7399  
  7400  // Task is a single process typically that is executed as part of a task group.
  7401  type Task struct {
  7402  	// Name of the task
  7403  	Name string
  7404  
  7405  	// Driver is used to control which driver is used
  7406  	Driver string
  7407  
  7408  	// User is used to determine which user will run the task. It defaults to
  7409  	// the same user the Nomad client is being run as.
  7410  	User string
  7411  
  7412  	// Config is provided to the driver to initialize
  7413  	Config map[string]interface{}
  7414  
  7415  	// Map of environment variables to be used by the driver
  7416  	Env map[string]string
  7417  
  7418  	// List of service definitions exposed by the Task
  7419  	Services []*Service
  7420  
  7421  	// Vault is used to define the set of Vault policies that this task should
  7422  	// have access to.
  7423  	Vault *Vault
  7424  
  7425  	// Templates are the set of templates to be rendered for the task.
  7426  	Templates []*Template
  7427  
  7428  	// Constraints can be specified at a task level and apply only to
  7429  	// the particular task.
  7430  	Constraints []*Constraint
  7431  
  7432  	// Affinities can be specified at the task level to express
  7433  	// scheduling preferences
  7434  	Affinities []*Affinity
  7435  
  7436  	// Resources is the resources needed by this task
  7437  	Resources *Resources
  7438  
  7439  	// RestartPolicy of a TaskGroup
  7440  	RestartPolicy *RestartPolicy
  7441  
  7442  	// DispatchPayload configures how the task retrieves its input from a dispatch
  7443  	DispatchPayload *DispatchPayloadConfig
  7444  
  7445  	Lifecycle *TaskLifecycleConfig
  7446  
  7447  	// Meta is used to associate arbitrary metadata with this
  7448  	// task. This is opaque to Nomad.
  7449  	Meta map[string]string
  7450  
  7451  	// KillTimeout is the time between signaling a task that it will be
  7452  	// killed and killing it.
  7453  	KillTimeout time.Duration
  7454  
  7455  	// LogConfig provides configuration for log rotation
  7456  	LogConfig *LogConfig
  7457  
  7458  	// Artifacts is a list of artifacts to download and extract before running
  7459  	// the task.
  7460  	Artifacts []*TaskArtifact
  7461  
  7462  	// Leader marks the task as the leader within the group. When the leader
  7463  	// task exits, other tasks will be gracefully terminated.
  7464  	Leader bool
  7465  
  7466  	// ShutdownDelay is the duration of the delay between de-registering a
  7467  	// task from Consul and sending it a signal to shutdown. See #2441
  7468  	ShutdownDelay time.Duration
  7469  
  7470  	// VolumeMounts is a list of Volume name <-> mount configurations that will be
  7471  	// attached to this task.
  7472  	VolumeMounts []*VolumeMount
  7473  
  7474  	// ScalingPolicies is a list of scaling policies scoped to this task
  7475  	ScalingPolicies []*ScalingPolicy
  7476  
  7477  	// KillSignal is the kill signal to use for the task. This is an optional
  7478  	// specification and defaults to SIGINT
  7479  	KillSignal string
  7480  
  7481  	// Used internally to manage tasks according to their TaskKind. Initial use case
  7482  	// is for Consul Connect
  7483  	Kind TaskKind
  7484  
  7485  	// CSIPluginConfig is used to configure the plugin supervisor for the task.
  7486  	CSIPluginConfig *TaskCSIPluginConfig
  7487  
  7488  	// Identity controls if and how the workload identity is exposed to
  7489  	// tasks similar to the Vault block.
  7490  	Identity *WorkloadIdentity
  7491  }
  7492  
  7493  // UsesConnect is for conveniently detecting if the Task is able to make use
  7494  // of Consul Connect features. This will be indicated in the TaskKind of the
  7495  // Task, which exports known types of Tasks. UsesConnect will be true if the
  7496  // task is a connect proxy, connect native, or is a connect gateway.
  7497  func (t *Task) UsesConnect() bool {
  7498  	return t.Kind.IsConnectNative() || t.UsesConnectSidecar()
  7499  }
  7500  
  7501  func (t *Task) UsesConnectSidecar() bool {
  7502  	return t.Kind.IsConnectProxy() || t.Kind.IsAnyConnectGateway()
  7503  }
  7504  
  7505  func (t *Task) IsPrestart() bool {
  7506  	return t != nil && t.Lifecycle != nil &&
  7507  		t.Lifecycle.Hook == TaskLifecycleHookPrestart
  7508  }
  7509  
  7510  func (t *Task) IsMain() bool {
  7511  	return t != nil && (t.Lifecycle == nil || t.Lifecycle.Hook == "")
  7512  }
  7513  
  7514  func (t *Task) IsPoststart() bool {
  7515  	return t != nil && t.Lifecycle != nil &&
  7516  		t.Lifecycle.Hook == TaskLifecycleHookPoststart
  7517  }
  7518  
  7519  func (t *Task) IsPoststop() bool {
  7520  	return t != nil && t.Lifecycle != nil &&
  7521  		t.Lifecycle.Hook == TaskLifecycleHookPoststop
  7522  }
  7523  
  7524  func (t *Task) Copy() *Task {
  7525  	if t == nil {
  7526  		return nil
  7527  	}
  7528  	nt := new(Task)
  7529  	*nt = *t
  7530  	nt.Env = maps.Clone(nt.Env)
  7531  
  7532  	if t.Services != nil {
  7533  		services := make([]*Service, len(nt.Services))
  7534  		for i, s := range nt.Services {
  7535  			services[i] = s.Copy()
  7536  		}
  7537  		nt.Services = services
  7538  	}
  7539  
  7540  	nt.Constraints = CopySliceConstraints(nt.Constraints)
  7541  	nt.Affinities = CopySliceAffinities(nt.Affinities)
  7542  	nt.VolumeMounts = CopySliceVolumeMount(nt.VolumeMounts)
  7543  	nt.CSIPluginConfig = nt.CSIPluginConfig.Copy()
  7544  
  7545  	nt.Vault = nt.Vault.Copy()
  7546  	nt.Resources = nt.Resources.Copy()
  7547  	nt.LogConfig = nt.LogConfig.Copy()
  7548  	nt.Meta = maps.Clone(nt.Meta)
  7549  	nt.DispatchPayload = nt.DispatchPayload.Copy()
  7550  	nt.Lifecycle = nt.Lifecycle.Copy()
  7551  	nt.Identity = nt.Identity.Copy()
  7552  
  7553  	if t.Artifacts != nil {
  7554  		artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
  7555  		for _, a := range nt.Artifacts {
  7556  			artifacts = append(artifacts, a.Copy())
  7557  		}
  7558  		nt.Artifacts = artifacts
  7559  	}
  7560  
  7561  	if i, err := copystructure.Copy(nt.Config); err != nil {
  7562  		panic(err.Error())
  7563  	} else {
  7564  		nt.Config = i.(map[string]interface{})
  7565  	}
  7566  
  7567  	if t.Templates != nil {
  7568  		templates := make([]*Template, len(t.Templates))
  7569  		for i, tmpl := range nt.Templates {
  7570  			templates[i] = tmpl.Copy()
  7571  		}
  7572  		nt.Templates = templates
  7573  	}
  7574  
  7575  	return nt
  7576  }
  7577  
  7578  // Canonicalize canonicalizes fields in the task.
  7579  func (t *Task) Canonicalize(job *Job, tg *TaskGroup) {
  7580  	// Ensure that an empty and nil map are treated the same to avoid scheduling
  7581  	// problems since we use reflect DeepEquals.
  7582  	if len(t.Meta) == 0 {
  7583  		t.Meta = nil
  7584  	}
  7585  	if len(t.Config) == 0 {
  7586  		t.Config = nil
  7587  	}
  7588  	if len(t.Env) == 0 {
  7589  		t.Env = nil
  7590  	}
  7591  
  7592  	for _, service := range t.Services {
  7593  		service.Canonicalize(job.Name, tg.Name, t.Name, job.Namespace)
  7594  	}
  7595  
  7596  	// If Resources are nil initialize them to defaults, otherwise canonicalize
  7597  	if t.Resources == nil {
  7598  		t.Resources = DefaultResources()
  7599  	} else {
  7600  		t.Resources.Canonicalize()
  7601  	}
  7602  
  7603  	if t.RestartPolicy == nil {
  7604  		t.RestartPolicy = tg.RestartPolicy
  7605  	}
  7606  
  7607  	// Set the default timeout if it is not specified.
  7608  	if t.KillTimeout == 0 {
  7609  		t.KillTimeout = DefaultKillTimeout
  7610  	}
  7611  
  7612  	if t.Vault != nil {
  7613  		t.Vault.Canonicalize()
  7614  	}
  7615  
  7616  	for _, template := range t.Templates {
  7617  		template.Canonicalize()
  7618  	}
  7619  }
  7620  
  7621  func (t *Task) GoString() string {
  7622  	return fmt.Sprintf("*%#v", *t)
  7623  }
  7624  
  7625  // Validate is used to check a task for reasonable configuration
  7626  func (t *Task) Validate(jobType string, tg *TaskGroup) error {
  7627  	var mErr multierror.Error
  7628  	if t.Name == "" {
  7629  		mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
  7630  	}
  7631  	if strings.ContainsAny(t.Name, `/\`) {
  7632  		// We enforce this so that when creating the directory on disk it will
  7633  		// not have any slashes.
  7634  		mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes"))
  7635  	} else if strings.Contains(t.Name, "\000") {
  7636  		mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include null characters"))
  7637  	}
  7638  	if t.Driver == "" {
  7639  		mErr.Errors = append(mErr.Errors, errors.New("Missing task driver"))
  7640  	}
  7641  	if t.KillTimeout < 0 {
  7642  		mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value"))
  7643  	} else {
  7644  		// Validate the group's update strategy does not conflict with the
  7645  		// task's kill_timeout for service jobs.
  7646  		//
  7647  		// progress_deadline = 0 has a special meaning so it should not be
  7648  		// validated against the task's kill_timeout.
  7649  		conflictsWithProgressDeadline := jobType == JobTypeService &&
  7650  			tg.Update != nil &&
  7651  			tg.Update.ProgressDeadline > 0 &&
  7652  			t.KillTimeout > tg.Update.ProgressDeadline
  7653  		if conflictsWithProgressDeadline {
  7654  			mErr.Errors = append(mErr.Errors, fmt.Errorf("KillTimout (%s) longer than the group's ProgressDeadline (%s)",
  7655  				t.KillTimeout, tg.Update.ProgressDeadline))
  7656  		}
  7657  	}
  7658  	if t.ShutdownDelay < 0 {
  7659  		mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
  7660  	}
  7661  
  7662  	// Validate the resources.
  7663  	if t.Resources == nil {
  7664  		mErr.Errors = append(mErr.Errors, errors.New("Missing task resources"))
  7665  	} else if err := t.Resources.Validate(); err != nil {
  7666  		mErr.Errors = append(mErr.Errors, err)
  7667  	}
  7668  
  7669  	// Validate the log config
  7670  	if t.LogConfig == nil {
  7671  		mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config"))
  7672  	} else if err := t.LogConfig.Validate(tg.EphemeralDisk); err != nil {
  7673  		mErr.Errors = append(mErr.Errors, err)
  7674  	}
  7675  
  7676  	// Validate constraints and affinities.
  7677  	for idx, constr := range t.Constraints {
  7678  		if err := constr.Validate(); err != nil {
  7679  			outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
  7680  			mErr.Errors = append(mErr.Errors, outer)
  7681  		}
  7682  
  7683  		switch constr.Operand {
  7684  		case ConstraintDistinctHosts, ConstraintDistinctProperty:
  7685  			outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand)
  7686  			mErr.Errors = append(mErr.Errors, outer)
  7687  		}
  7688  	}
  7689  
  7690  	if jobType == JobTypeSystem {
  7691  		if t.Affinities != nil {
  7692  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block"))
  7693  		}
  7694  	} else {
  7695  		for idx, affinity := range t.Affinities {
  7696  			if err := affinity.Validate(); err != nil {
  7697  				outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
  7698  				mErr.Errors = append(mErr.Errors, outer)
  7699  			}
  7700  		}
  7701  	}
  7702  
  7703  	// Validate Services
  7704  	if err := validateServices(t, tg.Networks); err != nil {
  7705  		mErr.Errors = append(mErr.Errors, err)
  7706  	}
  7707  
  7708  	// Validate artifacts.
  7709  	for idx, artifact := range t.Artifacts {
  7710  		if err := artifact.Validate(); err != nil {
  7711  			outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err)
  7712  			mErr.Errors = append(mErr.Errors, outer)
  7713  		}
  7714  	}
  7715  
  7716  	// Validate Vault.
  7717  	if t.Vault != nil {
  7718  		if err := t.Vault.Validate(); err != nil {
  7719  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err))
  7720  		}
  7721  	}
  7722  
  7723  	// Validate templates.
  7724  	destinations := make(map[string]int, len(t.Templates))
  7725  	for idx, tmpl := range t.Templates {
  7726  		if err := tmpl.Validate(); err != nil {
  7727  			outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err)
  7728  			mErr.Errors = append(mErr.Errors, outer)
  7729  		}
  7730  
  7731  		if other, ok := destinations[tmpl.DestPath]; ok {
  7732  			outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other)
  7733  			mErr.Errors = append(mErr.Errors, outer)
  7734  		} else {
  7735  			destinations[tmpl.DestPath] = idx + 1
  7736  		}
  7737  	}
  7738  
  7739  	// Validate the dispatch payload block if there
  7740  	if t.DispatchPayload != nil {
  7741  		if err := t.DispatchPayload.Validate(); err != nil {
  7742  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
  7743  		}
  7744  	}
  7745  
  7746  	// Validate the Lifecycle block if there
  7747  	if t.Lifecycle != nil {
  7748  		if err := t.Lifecycle.Validate(); err != nil {
  7749  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Lifecycle validation failed: %v", err))
  7750  		}
  7751  
  7752  	}
  7753  
  7754  	// Validation for TaskKind field which is used for Consul Connect integration
  7755  	if t.Kind.IsConnectProxy() {
  7756  		// This task is a Connect proxy so it should not have service blocks
  7757  		if len(t.Services) > 0 {
  7758  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service block"))
  7759  		}
  7760  		if t.Leader {
  7761  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have leader set"))
  7762  		}
  7763  
  7764  		// Ensure the proxy task has a corresponding service entry
  7765  		serviceErr := ValidateConnectProxyService(t.Kind.Value(), tg.Services)
  7766  		if serviceErr != nil {
  7767  			mErr.Errors = append(mErr.Errors, serviceErr)
  7768  		}
  7769  	}
  7770  
  7771  	// Validation for volumes
  7772  	for idx, vm := range t.VolumeMounts {
  7773  		if !MountPropagationModeIsValid(vm.PropagationMode) {
  7774  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) has an invalid propagation mode: \"%s\"", idx, vm.PropagationMode))
  7775  		}
  7776  
  7777  		// Validate the task does not reference undefined volume mounts
  7778  		if vm.Volume == "" {
  7779  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) references an empty volume", idx))
  7780  		} else if _, ok := tg.Volumes[vm.Volume]; !ok {
  7781  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) references undefined volume %s", idx, vm.Volume))
  7782  		}
  7783  	}
  7784  
  7785  	// Validate CSI Plugin Config
  7786  	if t.CSIPluginConfig != nil {
  7787  		if t.CSIPluginConfig.ID == "" {
  7788  			mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig must have a non-empty PluginID"))
  7789  		}
  7790  
  7791  		if !CSIPluginTypeIsValid(t.CSIPluginConfig.Type) {
  7792  			mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"%s\"", t.CSIPluginConfig.Type))
  7793  		}
  7794  
  7795  		// TODO: Investigate validation of the PluginMountDir. Not much we can do apart from check IsAbs until after we understand its execution environment though :(
  7796  	}
  7797  
  7798  	return mErr.ErrorOrNil()
  7799  }
  7800  
  7801  // validateServices takes a task and validates the services within it are valid
  7802  // and reference ports that exist.
  7803  func validateServices(t *Task, tgNetworks Networks) error {
  7804  	var mErr multierror.Error
  7805  
  7806  	// Ensure that services don't ask for nonexistent ports and their names are
  7807  	// unique.
  7808  	servicePorts := make(map[string]map[string]struct{})
  7809  	addServicePort := func(label, service string) {
  7810  		if _, ok := servicePorts[label]; !ok {
  7811  			servicePorts[label] = map[string]struct{}{}
  7812  		}
  7813  		servicePorts[label][service] = struct{}{}
  7814  	}
  7815  	knownServices := make(map[string]struct{})
  7816  	for i, service := range t.Services {
  7817  		if err := service.Validate(); err != nil {
  7818  			outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err)
  7819  			mErr.Errors = append(mErr.Errors, outer)
  7820  		}
  7821  
  7822  		if service.AddressMode == AddressModeAlloc {
  7823  			mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot use address_mode=\"alloc\", only services defined in a \"group\" block can use this mode", service.Name))
  7824  		}
  7825  
  7826  		// Ensure that services with the same name are not being registered for
  7827  		// the same port
  7828  		if _, ok := knownServices[service.Name+service.PortLabel]; ok {
  7829  			mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
  7830  		}
  7831  		knownServices[service.Name+service.PortLabel] = struct{}{}
  7832  
  7833  		if service.PortLabel != "" {
  7834  			if service.AddressMode == "driver" {
  7835  				// Numeric port labels are valid for address_mode=driver
  7836  				_, err := strconv.Atoi(service.PortLabel)
  7837  				if err != nil {
  7838  					// Not a numeric port label, add it to list to check
  7839  					addServicePort(service.PortLabel, service.Name)
  7840  				}
  7841  			} else {
  7842  				addServicePort(service.PortLabel, service.Name)
  7843  			}
  7844  		}
  7845  
  7846  		// connect block is only allowed on group level
  7847  		if service.Connect != nil {
  7848  			mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot have \"connect\" block, only services defined in a \"group\" block can", service.Name))
  7849  		}
  7850  
  7851  		// Ensure that check names are unique and have valid ports
  7852  		knownChecks := make(map[string]struct{})
  7853  		for _, check := range service.Checks {
  7854  			if _, ok := knownChecks[check.Name]; ok {
  7855  				mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
  7856  			}
  7857  			knownChecks[check.Name] = struct{}{}
  7858  
  7859  			if check.AddressMode == AddressModeAlloc {
  7860  				mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q cannot use address_mode=\"alloc\", only checks defined in a \"group\" service block can use this mode", service.Name))
  7861  			}
  7862  
  7863  			if !check.RequiresPort() {
  7864  				// No need to continue validating check if it doesn't need a port
  7865  				continue
  7866  			}
  7867  
  7868  			effectivePort := check.PortLabel
  7869  			if effectivePort == "" {
  7870  				// Inherits from service
  7871  				effectivePort = service.PortLabel
  7872  			}
  7873  
  7874  			if effectivePort == "" {
  7875  				mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is missing a port", check.Name))
  7876  				continue
  7877  			}
  7878  
  7879  			isNumeric := false
  7880  			portNumber, err := strconv.Atoi(effectivePort)
  7881  			if err == nil {
  7882  				isNumeric = true
  7883  			}
  7884  
  7885  			// Numeric ports are fine for address_mode = "driver"
  7886  			if check.AddressMode == "driver" && isNumeric {
  7887  				if portNumber <= 0 {
  7888  					mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q has invalid numeric port %d", check.Name, portNumber))
  7889  				}
  7890  				continue
  7891  			}
  7892  
  7893  			if isNumeric {
  7894  				mErr.Errors = append(mErr.Errors, fmt.Errorf(`check %q cannot use a numeric port %d without setting address_mode="driver"`, check.Name, portNumber))
  7895  				continue
  7896  			}
  7897  
  7898  			// PortLabel must exist, report errors by its parent service
  7899  			addServicePort(effectivePort, service.Name)
  7900  		}
  7901  	}
  7902  
  7903  	// Get the set of group port labels.
  7904  	portLabels := make(map[string]struct{})
  7905  	if len(tgNetworks) > 0 {
  7906  		ports := tgNetworks[0].PortLabels()
  7907  		for portLabel := range ports {
  7908  			portLabels[portLabel] = struct{}{}
  7909  		}
  7910  	}
  7911  
  7912  	// COMPAT(0.13)
  7913  	// Append the set of task port labels. (Note that network resources on the
  7914  	// task resources are deprecated, but we must let them continue working; a
  7915  	// warning will be emitted on job submission).
  7916  	if t.Resources != nil {
  7917  		for _, network := range t.Resources.Networks {
  7918  			for portLabel := range network.PortLabels() {
  7919  				portLabels[portLabel] = struct{}{}
  7920  			}
  7921  		}
  7922  	}
  7923  
  7924  	// Iterate over a sorted list of keys to make error listings stable
  7925  	keys := make([]string, 0, len(servicePorts))
  7926  	for p := range servicePorts {
  7927  		keys = append(keys, p)
  7928  	}
  7929  	sort.Strings(keys)
  7930  
  7931  	// Ensure all ports referenced in services exist.
  7932  	for _, servicePort := range keys {
  7933  		services := servicePorts[servicePort]
  7934  		_, ok := portLabels[servicePort]
  7935  		if !ok {
  7936  			names := make([]string, 0, len(services))
  7937  			for name := range services {
  7938  				names = append(names, name)
  7939  			}
  7940  
  7941  			// Keep order deterministic
  7942  			sort.Strings(names)
  7943  			joined := strings.Join(names, ", ")
  7944  			err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined)
  7945  			mErr.Errors = append(mErr.Errors, err)
  7946  		}
  7947  	}
  7948  
  7949  	// Ensure address mode is valid
  7950  	return mErr.ErrorOrNil()
  7951  }
  7952  
  7953  func (t *Task) Warnings() error {
  7954  	var mErr multierror.Error
  7955  
  7956  	// Validate the resources
  7957  	if t.Resources != nil && t.Resources.IOPS != 0 {
  7958  		mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource block."))
  7959  	}
  7960  
  7961  	if t.Resources != nil && len(t.Resources.Networks) != 0 {
  7962  		mErr.Errors = append(mErr.Errors, fmt.Errorf("task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block."))
  7963  	}
  7964  
  7965  	for idx, tmpl := range t.Templates {
  7966  		if err := tmpl.Warnings(); err != nil {
  7967  			err = multierror.Prefix(err, fmt.Sprintf("Template[%d]", idx))
  7968  			mErr.Errors = append(mErr.Errors, err)
  7969  		}
  7970  	}
  7971  
  7972  	return mErr.ErrorOrNil()
  7973  }
  7974  
  7975  // TaskKind identifies the special kinds of tasks using the following format:
  7976  // '<kind_name>(:<identifier>)`. The TaskKind can optionally include an identifier that
  7977  // is opaque to the Task. This identifier can be used to relate the task to some
  7978  // other entity based on the kind.
  7979  //
  7980  // For example, a task may have the TaskKind of `connect-proxy:service` where
  7981  // 'connect-proxy' is the kind name and 'service' is the identifier that relates the
  7982  // task to the service name of which it is a connect proxy for.
  7983  type TaskKind string
  7984  
  7985  func NewTaskKind(name, identifier string) TaskKind {
  7986  	return TaskKind(fmt.Sprintf("%s:%s", name, identifier))
  7987  }
  7988  
  7989  // Name returns the kind name portion of the TaskKind
  7990  func (k TaskKind) Name() string {
  7991  	return strings.Split(string(k), ":")[0]
  7992  }
  7993  
  7994  // Value returns the identifier of the TaskKind or an empty string if it doesn't
  7995  // include one.
  7996  func (k TaskKind) Value() string {
  7997  	if s := strings.SplitN(string(k), ":", 2); len(s) > 1 {
  7998  		return s[1]
  7999  	}
  8000  	return ""
  8001  }
  8002  
  8003  func (k TaskKind) hasPrefix(prefix string) bool {
  8004  	return strings.HasPrefix(string(k), prefix+":") && len(k) > len(prefix)+1
  8005  }
  8006  
  8007  // IsConnectProxy returns true if the TaskKind is connect-proxy.
  8008  func (k TaskKind) IsConnectProxy() bool {
  8009  	return k.hasPrefix(ConnectProxyPrefix)
  8010  }
  8011  
  8012  // IsConnectNative returns true if the TaskKind is connect-native.
  8013  func (k TaskKind) IsConnectNative() bool {
  8014  	return k.hasPrefix(ConnectNativePrefix)
  8015  }
  8016  
  8017  // IsConnectIngress returns true if the TaskKind is connect-ingress.
  8018  func (k TaskKind) IsConnectIngress() bool {
  8019  	return k.hasPrefix(ConnectIngressPrefix)
  8020  }
  8021  
  8022  // IsConnectTerminating returns true if the TaskKind is connect-terminating.
  8023  func (k TaskKind) IsConnectTerminating() bool {
  8024  	return k.hasPrefix(ConnectTerminatingPrefix)
  8025  }
  8026  
  8027  // IsConnectMesh returns true if the TaskKind is connect-mesh.
  8028  func (k TaskKind) IsConnectMesh() bool {
  8029  	return k.hasPrefix(ConnectMeshPrefix)
  8030  }
  8031  
  8032  // IsAnyConnectGateway returns true if the TaskKind represents any one of the
  8033  // supported connect gateway types.
  8034  func (k TaskKind) IsAnyConnectGateway() bool {
  8035  	switch {
  8036  	case k.IsConnectIngress():
  8037  		return true
  8038  	case k.IsConnectTerminating():
  8039  		return true
  8040  	case k.IsConnectMesh():
  8041  		return true
  8042  	default:
  8043  		return false
  8044  	}
  8045  }
  8046  
  8047  const (
  8048  	// ConnectProxyPrefix is the prefix used for fields referencing a Consul Connect
  8049  	// Proxy
  8050  	ConnectProxyPrefix = "connect-proxy"
  8051  
  8052  	// ConnectNativePrefix is the prefix used for fields referencing a Connect
  8053  	// Native Task
  8054  	ConnectNativePrefix = "connect-native"
  8055  
  8056  	// ConnectIngressPrefix is the prefix used for fields referencing a Consul
  8057  	// Connect Ingress Gateway Proxy.
  8058  	ConnectIngressPrefix = "connect-ingress"
  8059  
  8060  	// ConnectTerminatingPrefix is the prefix used for fields referencing a Consul
  8061  	// Connect Terminating Gateway Proxy.
  8062  	ConnectTerminatingPrefix = "connect-terminating"
  8063  
  8064  	// ConnectMeshPrefix is the prefix used for fields referencing a Consul Connect
  8065  	// Mesh Gateway Proxy.
  8066  	ConnectMeshPrefix = "connect-mesh"
  8067  )
  8068  
  8069  // ValidateConnectProxyService checks that the service that is being
  8070  // proxied by this task exists in the task group and contains
  8071  // valid Connect config.
  8072  func ValidateConnectProxyService(serviceName string, tgServices []*Service) error {
  8073  	found := false
  8074  	names := make([]string, 0, len(tgServices))
  8075  	for _, svc := range tgServices {
  8076  		if svc.Connect == nil || svc.Connect.SidecarService == nil {
  8077  			continue
  8078  		}
  8079  
  8080  		if svc.Name == serviceName {
  8081  			found = true
  8082  			break
  8083  		}
  8084  
  8085  		// Build up list of mismatched Connect service names for error
  8086  		// reporting.
  8087  		names = append(names, svc.Name)
  8088  	}
  8089  
  8090  	if !found {
  8091  		if len(names) == 0 {
  8092  			return fmt.Errorf("No Connect services in task group with Connect proxy (%q)", serviceName)
  8093  		} else {
  8094  			return fmt.Errorf("Connect proxy service name (%q) not found in Connect services from task group: %s", serviceName, names)
  8095  		}
  8096  	}
  8097  
  8098  	return nil
  8099  }
  8100  
  8101  const (
  8102  	// TemplateChangeModeNoop marks that no action should be taken if the
  8103  	// template is re-rendered
  8104  	TemplateChangeModeNoop = "noop"
  8105  
  8106  	// TemplateChangeModeSignal marks that the task should be signaled if the
  8107  	// template is re-rendered
  8108  	TemplateChangeModeSignal = "signal"
  8109  
  8110  	// TemplateChangeModeRestart marks that the task should be restarted if the
  8111  	// template is re-rendered
  8112  	TemplateChangeModeRestart = "restart"
  8113  
  8114  	// TemplateChangeModeScript marks that the task should trigger a script if
  8115  	// the template is re-rendered
  8116  	TemplateChangeModeScript = "script"
  8117  )
  8118  
  8119  var (
  8120  	// TemplateChangeModeInvalidError is the error for when an invalid change
  8121  	// mode is given
  8122  	TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, script, restart")
  8123  )
  8124  
  8125  // Template represents a template configuration to be rendered for a given task
  8126  type Template struct {
  8127  	// SourcePath is the path to the template to be rendered
  8128  	SourcePath string
  8129  
  8130  	// DestPath is the path to where the template should be rendered
  8131  	DestPath string
  8132  
  8133  	// EmbeddedTmpl store the raw template. This is useful for smaller templates
  8134  	// where they are embedded in the job file rather than sent as an artifact
  8135  	EmbeddedTmpl string
  8136  
  8137  	// ChangeMode indicates what should be done if the template is re-rendered
  8138  	ChangeMode string
  8139  
  8140  	// ChangeSignal is the signal that should be sent if the change mode
  8141  	// requires it.
  8142  	ChangeSignal string
  8143  
  8144  	// ChangeScript is the configuration of the script. It's required if
  8145  	// ChangeMode is set to script.
  8146  	ChangeScript *ChangeScript
  8147  
  8148  	// Splay is used to avoid coordinated restarts of processes by applying a
  8149  	// random wait between 0 and the given splay value before signalling the
  8150  	// application of a change
  8151  	Splay time.Duration
  8152  
  8153  	// Perms is the permission the file should be written out with.
  8154  	Perms string
  8155  	// User and group that should own the file.
  8156  	Uid *int
  8157  	Gid *int
  8158  
  8159  	// LeftDelim and RightDelim are optional configurations to control what
  8160  	// delimiter is utilized when parsing the template.
  8161  	LeftDelim  string
  8162  	RightDelim string
  8163  
  8164  	// Envvars enables exposing the template as environment variables
  8165  	// instead of as a file. The template must be of the form:
  8166  	//
  8167  	//	VAR_NAME_1={{ key service/my-key }}
  8168  	//	VAR_NAME_2=raw string and {{ env "attr.kernel.name" }}
  8169  	//
  8170  	// Lines will be split on the initial "=" with the first part being the
  8171  	// key name and the second part the value.
  8172  	// Empty lines and lines starting with # will be ignored, but to avoid
  8173  	// escaping issues #s within lines will not be treated as comments.
  8174  	Envvars bool
  8175  
  8176  	// VaultGrace is the grace duration between lease renewal and reacquiring a
  8177  	// secret. If the lease of a secret is less than the grace, a new secret is
  8178  	// acquired.
  8179  	// COMPAT(0.12) VaultGrace has been ignored by Vault since Vault v0.5.
  8180  	VaultGrace time.Duration
  8181  
  8182  	// WaitConfig is used to override the global WaitConfig on a per-template basis
  8183  	Wait *WaitConfig
  8184  
  8185  	// ErrMissingKey is used to control how the template behaves when attempting
  8186  	// to index a struct or map key that does not exist.
  8187  	ErrMissingKey bool
  8188  }
  8189  
  8190  // DefaultTemplate returns a default template.
  8191  func DefaultTemplate() *Template {
  8192  	return &Template{
  8193  		ChangeMode: TemplateChangeModeRestart,
  8194  		Splay:      5 * time.Second,
  8195  		Perms:      "0644",
  8196  	}
  8197  }
  8198  
  8199  func (t *Template) Equal(o *Template) bool {
  8200  	if t == nil || o == nil {
  8201  		return t == o
  8202  	}
  8203  	switch {
  8204  	case t.SourcePath != o.SourcePath:
  8205  		return false
  8206  	case t.DestPath != o.DestPath:
  8207  		return false
  8208  	case t.EmbeddedTmpl != o.EmbeddedTmpl:
  8209  		return false
  8210  	case t.ChangeMode != o.ChangeMode:
  8211  		return false
  8212  	case t.ChangeSignal != o.ChangeSignal:
  8213  		return false
  8214  	case !t.ChangeScript.Equal(o.ChangeScript):
  8215  		return false
  8216  	case t.Splay != o.Splay:
  8217  		return false
  8218  	case t.Perms != o.Perms:
  8219  		return false
  8220  	case !pointer.Eq(t.Uid, o.Uid):
  8221  		return false
  8222  	case !pointer.Eq(t.Gid, o.Gid):
  8223  		return false
  8224  	case t.LeftDelim != o.LeftDelim:
  8225  		return false
  8226  	case t.RightDelim != o.RightDelim:
  8227  		return false
  8228  	case t.Envvars != o.Envvars:
  8229  		return false
  8230  	case t.VaultGrace != o.VaultGrace:
  8231  		return false
  8232  	case !t.Wait.Equal(o.Wait):
  8233  		return false
  8234  	case t.ErrMissingKey != o.ErrMissingKey:
  8235  		return false
  8236  	}
  8237  	return true
  8238  }
  8239  
  8240  func (t *Template) Copy() *Template {
  8241  	if t == nil {
  8242  		return nil
  8243  	}
  8244  	nt := new(Template)
  8245  	*nt = *t
  8246  
  8247  	nt.ChangeScript = t.ChangeScript.Copy()
  8248  	nt.Wait = t.Wait.Copy()
  8249  
  8250  	return nt
  8251  }
  8252  
  8253  func (t *Template) Canonicalize() {
  8254  	if t.ChangeSignal != "" {
  8255  		t.ChangeSignal = strings.ToUpper(t.ChangeSignal)
  8256  	}
  8257  }
  8258  
  8259  func (t *Template) Validate() error {
  8260  	var mErr multierror.Error
  8261  
  8262  	// Verify we have something to render
  8263  	if t.SourcePath == "" && t.EmbeddedTmpl == "" {
  8264  		_ = multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template"))
  8265  	}
  8266  
  8267  	// Verify we can render somewhere
  8268  	if t.DestPath == "" {
  8269  		_ = multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template"))
  8270  	}
  8271  
  8272  	// Verify the destination doesn't escape
  8273  	escaped, err := escapingfs.PathEscapesAllocViaRelative("task", t.DestPath)
  8274  	if err != nil {
  8275  		mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
  8276  	} else if escaped {
  8277  		mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
  8278  	}
  8279  
  8280  	// Verify a proper change mode
  8281  	switch t.ChangeMode {
  8282  	case TemplateChangeModeNoop, TemplateChangeModeRestart:
  8283  	case TemplateChangeModeSignal:
  8284  		if t.ChangeSignal == "" {
  8285  			_ = multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal"))
  8286  		}
  8287  		if t.Envvars {
  8288  			_ = multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates"))
  8289  		}
  8290  	case TemplateChangeModeScript:
  8291  		if t.ChangeScript == nil {
  8292  			_ = multierror.Append(&mErr, fmt.Errorf("must specify change script configuration value when change mode is script"))
  8293  		}
  8294  
  8295  		if err = t.ChangeScript.Validate(); err != nil {
  8296  			_ = multierror.Append(&mErr, err)
  8297  		}
  8298  	default:
  8299  		_ = multierror.Append(&mErr, TemplateChangeModeInvalidError)
  8300  	}
  8301  
  8302  	// Verify the splay is positive
  8303  	if t.Splay < 0 {
  8304  		_ = multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value"))
  8305  	}
  8306  
  8307  	// Verify the permissions
  8308  	if t.Perms != "" {
  8309  		if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil {
  8310  			_ = multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err))
  8311  		}
  8312  	}
  8313  
  8314  	if err = t.Wait.Validate(); err != nil {
  8315  		_ = multierror.Append(&mErr, err)
  8316  	}
  8317  
  8318  	return mErr.ErrorOrNil()
  8319  }
  8320  
  8321  func (t *Template) Warnings() error {
  8322  	var mErr multierror.Error
  8323  
  8324  	// Deprecation notice for vault_grace
  8325  	if t.VaultGrace != 0 {
  8326  		mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template block."))
  8327  	}
  8328  
  8329  	return mErr.ErrorOrNil()
  8330  }
  8331  
  8332  // DiffID fulfills the DiffableWithID interface.
  8333  func (t *Template) DiffID() string {
  8334  	return t.DestPath
  8335  }
  8336  
  8337  // ChangeScript holds the configuration for the script that is executed if
  8338  // change mode is set to script
  8339  type ChangeScript struct {
  8340  	// Command is the full path to the script
  8341  	Command string
  8342  	// Args is a slice of arguments passed to the script
  8343  	Args []string
  8344  	// Timeout is the amount of seconds we wait for the script to finish
  8345  	Timeout time.Duration
  8346  	// FailOnError indicates whether a task should fail in case script execution
  8347  	// fails or log script failure and don't interrupt the task
  8348  	FailOnError bool
  8349  }
  8350  
  8351  func (cs *ChangeScript) Equal(o *ChangeScript) bool {
  8352  	if cs == nil || o == nil {
  8353  		return cs == o
  8354  	}
  8355  	switch {
  8356  	case cs.Command != o.Command:
  8357  		return false
  8358  	case !slices.Equal(cs.Args, o.Args):
  8359  		return false
  8360  	case cs.Timeout != o.Timeout:
  8361  		return false
  8362  	case cs.FailOnError != o.FailOnError:
  8363  		return false
  8364  	}
  8365  	return true
  8366  }
  8367  
  8368  func (cs *ChangeScript) Copy() *ChangeScript {
  8369  	if cs == nil {
  8370  		return nil
  8371  	}
  8372  	return &ChangeScript{
  8373  		Command:     cs.Command,
  8374  		Args:        slices.Clone(cs.Args),
  8375  		Timeout:     cs.Timeout,
  8376  		FailOnError: cs.FailOnError,
  8377  	}
  8378  }
  8379  
  8380  // Validate makes sure all the required fields of ChangeScript are present
  8381  func (cs *ChangeScript) Validate() error {
  8382  	if cs == nil {
  8383  		return nil
  8384  	}
  8385  
  8386  	if cs.Command == "" {
  8387  		return fmt.Errorf("must specify script path value when change mode is script")
  8388  	}
  8389  
  8390  	return nil
  8391  }
  8392  
  8393  // WaitConfig is the Min/Max duration used by the Consul Template Watcher. Consul
  8394  // Template relies on pointer based business logic. This struct uses pointers so
  8395  // that we tell the different between zero values and unset values.
  8396  type WaitConfig struct {
  8397  	Min *time.Duration
  8398  	Max *time.Duration
  8399  }
  8400  
  8401  // Copy returns a deep copy of this configuration.
  8402  func (wc *WaitConfig) Copy() *WaitConfig {
  8403  	if wc == nil {
  8404  		return nil
  8405  	}
  8406  
  8407  	nwc := new(WaitConfig)
  8408  
  8409  	if wc.Min != nil {
  8410  		nwc.Min = wc.Min
  8411  	}
  8412  
  8413  	if wc.Max != nil {
  8414  		nwc.Max = wc.Max
  8415  	}
  8416  
  8417  	return nwc
  8418  }
  8419  
  8420  func (wc *WaitConfig) Equal(o *WaitConfig) bool {
  8421  	if wc == nil || o == nil {
  8422  		return wc == o
  8423  	}
  8424  	switch {
  8425  	case !pointer.Eq(wc.Min, o.Min):
  8426  		return false
  8427  	case !pointer.Eq(wc.Max, o.Max):
  8428  		return false
  8429  	}
  8430  	return true
  8431  }
  8432  
  8433  // Validate that the min is not greater than the max
  8434  func (wc *WaitConfig) Validate() error {
  8435  	if wc == nil {
  8436  		return nil
  8437  	}
  8438  
  8439  	// If either one is nil, they aren't comparable, so they can't be invalid.
  8440  	if wc.Min == nil || wc.Max == nil {
  8441  		return nil
  8442  	}
  8443  
  8444  	if *wc.Min > *wc.Max {
  8445  		return fmt.Errorf("wait min %s is greater than max %s", wc.Min, wc.Max)
  8446  	}
  8447  
  8448  	return nil
  8449  }
  8450  
  8451  // AllocStateField records a single event that changes the state of the whole allocation
  8452  type AllocStateField uint8
  8453  
  8454  const (
  8455  	AllocStateFieldClientStatus AllocStateField = iota
  8456  )
  8457  
  8458  type AllocState struct {
  8459  	Field AllocStateField
  8460  	Value string
  8461  	Time  time.Time
  8462  }
  8463  
  8464  // TaskHandle is  optional handle to a task propogated to the servers for use
  8465  // by remote tasks. Since remote tasks are not implicitly lost when the node
  8466  // they are assigned to is down, their state is migrated to the replacement
  8467  // allocation.
  8468  //
  8469  // Minimal set of fields from plugins/drivers/task_handle.go:TaskHandle
  8470  type TaskHandle struct {
  8471  	// Version of driver state. Used by the driver to gracefully handle
  8472  	// plugin upgrades.
  8473  	Version int
  8474  
  8475  	// Driver-specific state containing a handle to the remote task.
  8476  	DriverState []byte
  8477  }
  8478  
  8479  func (h *TaskHandle) Copy() *TaskHandle {
  8480  	if h == nil {
  8481  		return nil
  8482  	}
  8483  
  8484  	newTH := TaskHandle{
  8485  		Version:     h.Version,
  8486  		DriverState: make([]byte, len(h.DriverState)),
  8487  	}
  8488  	copy(newTH.DriverState, h.DriverState)
  8489  	return &newTH
  8490  }
  8491  
  8492  func (h *TaskHandle) Equal(o *TaskHandle) bool {
  8493  	if h == nil || o == nil {
  8494  		return h == o
  8495  	}
  8496  	if h.Version != o.Version {
  8497  		return false
  8498  	}
  8499  	return bytes.Equal(h.DriverState, o.DriverState)
  8500  }
  8501  
  8502  // Set of possible states for a task.
  8503  const (
  8504  	TaskStatePending = "pending" // The task is waiting to be run.
  8505  	TaskStateRunning = "running" // The task is currently running.
  8506  	TaskStateDead    = "dead"    // Terminal state of task.
  8507  )
  8508  
  8509  // TaskState tracks the current state of a task and events that caused state
  8510  // transitions.
  8511  type TaskState struct {
  8512  	// The current state of the task.
  8513  	State string
  8514  
  8515  	// Failed marks a task as having failed
  8516  	Failed bool
  8517  
  8518  	// Restarts is the number of times the task has restarted
  8519  	Restarts uint64
  8520  
  8521  	// LastRestart is the time the task last restarted. It is updated each time the
  8522  	// task restarts
  8523  	LastRestart time.Time
  8524  
  8525  	// StartedAt is the time the task is started. It is updated each time the
  8526  	// task starts
  8527  	StartedAt time.Time
  8528  
  8529  	// FinishedAt is the time at which the task transitioned to dead and will
  8530  	// not be started again.
  8531  	FinishedAt time.Time
  8532  
  8533  	// Series of task events that transition the state of the task.
  8534  	Events []*TaskEvent
  8535  
  8536  	// Experimental -  TaskHandle is based on drivers.TaskHandle and used
  8537  	// by remote task drivers to migrate task handles between allocations.
  8538  	TaskHandle *TaskHandle
  8539  }
  8540  
  8541  // NewTaskState returns a TaskState initialized in the Pending state.
  8542  func NewTaskState() *TaskState {
  8543  	return &TaskState{
  8544  		State: TaskStatePending,
  8545  	}
  8546  }
  8547  
  8548  // Canonicalize ensures the TaskState has a State set. It should default to
  8549  // Pending.
  8550  func (ts *TaskState) Canonicalize() {
  8551  	if ts.State == "" {
  8552  		ts.State = TaskStatePending
  8553  	}
  8554  }
  8555  
  8556  func (ts *TaskState) Copy() *TaskState {
  8557  	if ts == nil {
  8558  		return nil
  8559  	}
  8560  	newTS := new(TaskState)
  8561  	*newTS = *ts
  8562  
  8563  	if ts.Events != nil {
  8564  		newTS.Events = make([]*TaskEvent, len(ts.Events))
  8565  		for i, e := range ts.Events {
  8566  			newTS.Events[i] = e.Copy()
  8567  		}
  8568  	}
  8569  
  8570  	newTS.TaskHandle = ts.TaskHandle.Copy()
  8571  	return newTS
  8572  }
  8573  
  8574  // Successful returns whether a task finished successfully. Only meaningful for
  8575  // for batch allocations or ephemeral (non-sidecar) lifecycle tasks part of a
  8576  // service or system allocation.
  8577  func (ts *TaskState) Successful() bool {
  8578  	return ts.State == TaskStateDead && !ts.Failed
  8579  }
  8580  
  8581  func (ts *TaskState) Equal(o *TaskState) bool {
  8582  	if ts.State != o.State {
  8583  		return false
  8584  	}
  8585  	if ts.Failed != o.Failed {
  8586  		return false
  8587  	}
  8588  	if ts.Restarts != o.Restarts {
  8589  		return false
  8590  	}
  8591  	if ts.LastRestart != o.LastRestart {
  8592  		return false
  8593  	}
  8594  	if ts.StartedAt != o.StartedAt {
  8595  		return false
  8596  	}
  8597  	if ts.FinishedAt != o.FinishedAt {
  8598  		return false
  8599  	}
  8600  	if !slices.EqualFunc(ts.Events, o.Events, func(ts, o *TaskEvent) bool {
  8601  		return ts.Equal(o)
  8602  	}) {
  8603  		return false
  8604  	}
  8605  	if !ts.TaskHandle.Equal(o.TaskHandle) {
  8606  		return false
  8607  	}
  8608  
  8609  	return true
  8610  }
  8611  
  8612  const (
  8613  	// TaskSetupFailure indicates that the task could not be started due to a
  8614  	// a setup failure.
  8615  	TaskSetupFailure = "Setup Failure"
  8616  
  8617  	// TaskDriveFailure indicates that the task could not be started due to a
  8618  	// failure in the driver. TaskDriverFailure is considered Recoverable.
  8619  	TaskDriverFailure = "Driver Failure"
  8620  
  8621  	// TaskReceived signals that the task has been pulled by the client at the
  8622  	// given timestamp.
  8623  	TaskReceived = "Received"
  8624  
  8625  	// TaskFailedValidation indicates the task was invalid and as such was not run.
  8626  	// TaskFailedValidation is not considered Recoverable.
  8627  	TaskFailedValidation = "Failed Validation"
  8628  
  8629  	// TaskStarted signals that the task was started and its timestamp can be
  8630  	// used to determine the running length of the task.
  8631  	TaskStarted = "Started"
  8632  
  8633  	// TaskTerminated indicates that the task was started and exited.
  8634  	TaskTerminated = "Terminated"
  8635  
  8636  	// TaskKilling indicates a kill signal has been sent to the task.
  8637  	TaskKilling = "Killing"
  8638  
  8639  	// TaskKilled indicates a user has killed the task.
  8640  	TaskKilled = "Killed"
  8641  
  8642  	// TaskRestarting indicates that task terminated and is being restarted.
  8643  	TaskRestarting = "Restarting"
  8644  
  8645  	// TaskNotRestarting indicates that the task has failed and is not being
  8646  	// restarted because it has exceeded its restart policy.
  8647  	TaskNotRestarting = "Not Restarting"
  8648  
  8649  	// TaskRestartSignal indicates that the task has been signaled to be
  8650  	// restarted
  8651  	TaskRestartSignal = "Restart Signaled"
  8652  
  8653  	// TaskSignaling indicates that the task is being signalled.
  8654  	TaskSignaling = "Signaling"
  8655  
  8656  	// TaskDownloadingArtifacts means the task is downloading the artifacts
  8657  	// specified in the task.
  8658  	TaskDownloadingArtifacts = "Downloading Artifacts"
  8659  
  8660  	// TaskArtifactDownloadFailed indicates that downloading the artifacts
  8661  	// failed.
  8662  	TaskArtifactDownloadFailed = "Failed Artifact Download"
  8663  
  8664  	// TaskBuildingTaskDir indicates that the task directory/chroot is being
  8665  	// built.
  8666  	TaskBuildingTaskDir = "Building Task Directory"
  8667  
  8668  	// TaskSetup indicates the task runner is setting up the task environment
  8669  	TaskSetup = "Task Setup"
  8670  
  8671  	// TaskDiskExceeded indicates that one of the tasks in a taskgroup has
  8672  	// exceeded the requested disk resources.
  8673  	TaskDiskExceeded = "Disk Resources Exceeded"
  8674  
  8675  	// TaskSiblingFailed indicates that a sibling task in the task group has
  8676  	// failed.
  8677  	TaskSiblingFailed = "Sibling Task Failed"
  8678  
  8679  	// TaskDriverMessage is an informational event message emitted by
  8680  	// drivers such as when they're performing a long running action like
  8681  	// downloading an image.
  8682  	TaskDriverMessage = "Driver"
  8683  
  8684  	// TaskLeaderDead indicates that the leader task within the has finished.
  8685  	TaskLeaderDead = "Leader Task Dead"
  8686  
  8687  	// TaskMainDead indicates that the main tasks have dead
  8688  	TaskMainDead = "Main Tasks Dead"
  8689  
  8690  	// TaskHookFailed indicates that one of the hooks for a task failed.
  8691  	TaskHookFailed = "Task hook failed"
  8692  
  8693  	// TaskHookMessage indicates that one of the hooks for a task emitted a
  8694  	// message.
  8695  	TaskHookMessage = "Task hook message"
  8696  
  8697  	// TaskRestoreFailed indicates Nomad was unable to reattach to a
  8698  	// restored task.
  8699  	TaskRestoreFailed = "Failed Restoring Task"
  8700  
  8701  	// TaskPluginUnhealthy indicates that a plugin managed by Nomad became unhealthy
  8702  	TaskPluginUnhealthy = "Plugin became unhealthy"
  8703  
  8704  	// TaskPluginHealthy indicates that a plugin managed by Nomad became healthy
  8705  	TaskPluginHealthy = "Plugin became healthy"
  8706  
  8707  	// TaskClientReconnected indicates that the client running the task disconnected.
  8708  	TaskClientReconnected = "Reconnected"
  8709  
  8710  	// TaskWaitingShuttingDownDelay indicates that the task is waiting for
  8711  	// shutdown delay before being TaskKilled
  8712  	TaskWaitingShuttingDownDelay = "Waiting for shutdown delay"
  8713  
  8714  	// TaskSkippingShutdownDelay indicates that the task operation was
  8715  	// configured to ignore the shutdown delay value set for the tas.
  8716  	TaskSkippingShutdownDelay = "Skipping shutdown delay"
  8717  )
  8718  
  8719  // TaskEvent is an event that effects the state of a task and contains meta-data
  8720  // appropriate to the events type.
  8721  type TaskEvent struct {
  8722  	Type string
  8723  	Time int64 // Unix Nanosecond timestamp
  8724  
  8725  	Message string // A possible message explaining the termination of the task.
  8726  
  8727  	// DisplayMessage is a human friendly message about the event
  8728  	DisplayMessage string
  8729  
  8730  	// Details is a map with annotated info about the event
  8731  	Details map[string]string
  8732  
  8733  	// DEPRECATION NOTICE: The following fields are deprecated and will be removed
  8734  	// in a future release. Field values are available in the Details map.
  8735  
  8736  	// FailsTask marks whether this event fails the task.
  8737  	// Deprecated, use Details["fails_task"] to access this.
  8738  	FailsTask bool
  8739  
  8740  	// Restart fields.
  8741  	// Deprecated, use Details["restart_reason"] to access this.
  8742  	RestartReason string
  8743  
  8744  	// Setup Failure fields.
  8745  	// Deprecated, use Details["setup_error"] to access this.
  8746  	SetupError string
  8747  
  8748  	// Driver Failure fields.
  8749  	// Deprecated, use Details["driver_error"] to access this.
  8750  	DriverError string // A driver error occurred while starting the task.
  8751  
  8752  	// Task Terminated Fields.
  8753  
  8754  	// Deprecated, use Details["exit_code"] to access this.
  8755  	ExitCode int // The exit code of the task.
  8756  
  8757  	// Deprecated, use Details["signal"] to access this.
  8758  	Signal int // The signal that terminated the task.
  8759  
  8760  	// Killing fields
  8761  	// Deprecated, use Details["kill_timeout"] to access this.
  8762  	KillTimeout time.Duration
  8763  
  8764  	// Task Killed Fields.
  8765  	// Deprecated, use Details["kill_error"] to access this.
  8766  	KillError string // Error killing the task.
  8767  
  8768  	// KillReason is the reason the task was killed
  8769  	// Deprecated, use Details["kill_reason"] to access this.
  8770  	KillReason string
  8771  
  8772  	// TaskRestarting fields.
  8773  	// Deprecated, use Details["start_delay"] to access this.
  8774  	StartDelay int64 // The sleep period before restarting the task in unix nanoseconds.
  8775  
  8776  	// Artifact Download fields
  8777  	// Deprecated, use Details["download_error"] to access this.
  8778  	DownloadError string // Error downloading artifacts
  8779  
  8780  	// Validation fields
  8781  	// Deprecated, use Details["validation_error"] to access this.
  8782  	ValidationError string // Validation error
  8783  
  8784  	// The maximum allowed task disk size.
  8785  	// Deprecated, use Details["disk_limit"] to access this.
  8786  	DiskLimit int64
  8787  
  8788  	// Name of the sibling task that caused termination of the task that
  8789  	// the TaskEvent refers to.
  8790  	// Deprecated, use Details["failed_sibling"] to access this.
  8791  	FailedSibling string
  8792  
  8793  	// VaultError is the error from token renewal
  8794  	// Deprecated, use Details["vault_renewal_error"] to access this.
  8795  	VaultError string
  8796  
  8797  	// TaskSignalReason indicates the reason the task is being signalled.
  8798  	// Deprecated, use Details["task_signal_reason"] to access this.
  8799  	TaskSignalReason string
  8800  
  8801  	// TaskSignal is the signal that was sent to the task
  8802  	// Deprecated, use Details["task_signal"] to access this.
  8803  	TaskSignal string
  8804  
  8805  	// DriverMessage indicates a driver action being taken.
  8806  	// Deprecated, use Details["driver_message"] to access this.
  8807  	DriverMessage string
  8808  
  8809  	// GenericSource is the source of a message.
  8810  	// Deprecated, is redundant with event type.
  8811  	GenericSource string
  8812  }
  8813  
  8814  func (e *TaskEvent) PopulateEventDisplayMessage() {
  8815  	// Build up the description based on the event type.
  8816  	if e == nil { //TODO(preetha) needs investigation alloc_runner's Run method sends a nil event when sigterming nomad. Why?
  8817  		return
  8818  	}
  8819  
  8820  	if e.DisplayMessage != "" {
  8821  		return
  8822  	}
  8823  
  8824  	var desc string
  8825  	switch e.Type {
  8826  	case TaskSetup:
  8827  		desc = e.Message
  8828  	case TaskStarted:
  8829  		desc = "Task started by client"
  8830  	case TaskReceived:
  8831  		desc = "Task received by client"
  8832  	case TaskFailedValidation:
  8833  		if e.ValidationError != "" {
  8834  			desc = e.ValidationError
  8835  		} else {
  8836  			desc = "Validation of task failed"
  8837  		}
  8838  	case TaskSetupFailure:
  8839  		if e.SetupError != "" {
  8840  			desc = e.SetupError
  8841  		} else {
  8842  			desc = "Task setup failed"
  8843  		}
  8844  	case TaskDriverFailure:
  8845  		if e.DriverError != "" {
  8846  			desc = e.DriverError
  8847  		} else {
  8848  			desc = "Failed to start task"
  8849  		}
  8850  	case TaskDownloadingArtifacts:
  8851  		desc = "Client is downloading artifacts"
  8852  	case TaskArtifactDownloadFailed:
  8853  		if e.DownloadError != "" {
  8854  			desc = e.DownloadError
  8855  		} else {
  8856  			desc = "Failed to download artifacts"
  8857  		}
  8858  	case TaskKilling:
  8859  		if e.KillReason != "" {
  8860  			desc = e.KillReason
  8861  		} else if e.KillTimeout != 0 {
  8862  			desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", e.KillTimeout)
  8863  		} else {
  8864  			desc = "Sent interrupt"
  8865  		}
  8866  	case TaskKilled:
  8867  		if e.KillError != "" {
  8868  			desc = e.KillError
  8869  		} else {
  8870  			desc = "Task successfully killed"
  8871  		}
  8872  	case TaskTerminated:
  8873  		var parts []string
  8874  		parts = append(parts, fmt.Sprintf("Exit Code: %d", e.ExitCode))
  8875  
  8876  		if e.Signal != 0 {
  8877  			parts = append(parts, fmt.Sprintf("Signal: %d", e.Signal))
  8878  		}
  8879  
  8880  		if e.Message != "" {
  8881  			parts = append(parts, fmt.Sprintf("Exit Message: %q", e.Message))
  8882  		}
  8883  		desc = strings.Join(parts, ", ")
  8884  	case TaskRestarting:
  8885  		in := fmt.Sprintf("Task restarting in %v", time.Duration(e.StartDelay))
  8886  		if e.RestartReason != "" && e.RestartReason != ReasonWithinPolicy {
  8887  			desc = fmt.Sprintf("%s - %s", e.RestartReason, in)
  8888  		} else {
  8889  			desc = in
  8890  		}
  8891  	case TaskNotRestarting:
  8892  		if e.RestartReason != "" {
  8893  			desc = e.RestartReason
  8894  		} else {
  8895  			desc = "Task exceeded restart policy"
  8896  		}
  8897  	case TaskSiblingFailed:
  8898  		if e.FailedSibling != "" {
  8899  			desc = fmt.Sprintf("Task's sibling %q failed", e.FailedSibling)
  8900  		} else {
  8901  			desc = "Task's sibling failed"
  8902  		}
  8903  	case TaskSignaling:
  8904  		sig := e.TaskSignal
  8905  		reason := e.TaskSignalReason
  8906  
  8907  		if sig == "" && reason == "" {
  8908  			desc = "Task being sent a signal"
  8909  		} else if sig == "" {
  8910  			desc = reason
  8911  		} else if reason == "" {
  8912  			desc = fmt.Sprintf("Task being sent signal %v", sig)
  8913  		} else {
  8914  			desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
  8915  		}
  8916  	case TaskRestartSignal:
  8917  		if e.RestartReason != "" {
  8918  			desc = e.RestartReason
  8919  		} else {
  8920  			desc = "Task signaled to restart"
  8921  		}
  8922  	case TaskDriverMessage:
  8923  		desc = e.DriverMessage
  8924  	case TaskLeaderDead:
  8925  		desc = "Leader Task in Group dead"
  8926  	case TaskMainDead:
  8927  		desc = "Main tasks in the group died"
  8928  	case TaskClientReconnected:
  8929  		desc = "Client reconnected"
  8930  	default:
  8931  		desc = e.Message
  8932  	}
  8933  
  8934  	e.DisplayMessage = desc
  8935  }
  8936  
  8937  func (e *TaskEvent) GoString() string {
  8938  	if e == nil {
  8939  		return ""
  8940  	}
  8941  	return fmt.Sprintf("%v - %v", e.Time, e.Type)
  8942  }
  8943  
  8944  // Equal on TaskEvent ignores the deprecated fields
  8945  func (e *TaskEvent) Equal(o *TaskEvent) bool {
  8946  	if e == nil || o == nil {
  8947  		return e == o
  8948  	}
  8949  
  8950  	if e.Type != o.Type {
  8951  		return false
  8952  	}
  8953  	if e.Time != o.Time {
  8954  		return false
  8955  	}
  8956  	if e.Message != o.Message {
  8957  		return false
  8958  	}
  8959  	if e.DisplayMessage != o.DisplayMessage {
  8960  		return false
  8961  	}
  8962  	if !maps.Equal(e.Details, o.Details) {
  8963  		return false
  8964  	}
  8965  
  8966  	return true
  8967  }
  8968  
  8969  // SetDisplayMessage sets the display message of TaskEvent
  8970  func (e *TaskEvent) SetDisplayMessage(msg string) *TaskEvent {
  8971  	e.DisplayMessage = msg
  8972  	return e
  8973  }
  8974  
  8975  // SetMessage sets the message of TaskEvent
  8976  func (e *TaskEvent) SetMessage(msg string) *TaskEvent {
  8977  	e.Message = msg
  8978  	e.Details["message"] = msg
  8979  	return e
  8980  }
  8981  
  8982  func (e *TaskEvent) Copy() *TaskEvent {
  8983  	if e == nil {
  8984  		return nil
  8985  	}
  8986  	copy := new(TaskEvent)
  8987  	*copy = *e
  8988  	return copy
  8989  }
  8990  
  8991  func NewTaskEvent(event string) *TaskEvent {
  8992  	return &TaskEvent{
  8993  		Type:    event,
  8994  		Time:    time.Now().UnixNano(),
  8995  		Details: make(map[string]string),
  8996  	}
  8997  }
  8998  
  8999  // SetSetupError is used to store an error that occurred while setting up the
  9000  // task
  9001  func (e *TaskEvent) SetSetupError(err error) *TaskEvent {
  9002  	if err != nil {
  9003  		e.SetupError = err.Error()
  9004  		e.Details["setup_error"] = err.Error()
  9005  	}
  9006  	return e
  9007  }
  9008  
  9009  func (e *TaskEvent) SetFailsTask() *TaskEvent {
  9010  	e.FailsTask = true
  9011  	e.Details["fails_task"] = "true"
  9012  	return e
  9013  }
  9014  
  9015  func (e *TaskEvent) SetDriverError(err error) *TaskEvent {
  9016  	if err != nil {
  9017  		e.DriverError = err.Error()
  9018  		e.Details["driver_error"] = err.Error()
  9019  	}
  9020  	return e
  9021  }
  9022  
  9023  func (e *TaskEvent) SetExitCode(c int) *TaskEvent {
  9024  	e.ExitCode = c
  9025  	e.Details["exit_code"] = fmt.Sprintf("%d", c)
  9026  	return e
  9027  }
  9028  
  9029  func (e *TaskEvent) SetSignal(s int) *TaskEvent {
  9030  	e.Signal = s
  9031  	e.Details["signal"] = fmt.Sprintf("%d", s)
  9032  	return e
  9033  }
  9034  
  9035  func (e *TaskEvent) SetSignalText(s string) *TaskEvent {
  9036  	e.Details["signal"] = s
  9037  	return e
  9038  }
  9039  
  9040  func (e *TaskEvent) SetExitMessage(err error) *TaskEvent {
  9041  	if err != nil {
  9042  		e.Message = err.Error()
  9043  		e.Details["exit_message"] = err.Error()
  9044  	}
  9045  	return e
  9046  }
  9047  
  9048  func (e *TaskEvent) SetKillError(err error) *TaskEvent {
  9049  	if err != nil {
  9050  		e.KillError = err.Error()
  9051  		e.Details["kill_error"] = err.Error()
  9052  	}
  9053  	return e
  9054  }
  9055  
  9056  func (e *TaskEvent) SetKillReason(r string) *TaskEvent {
  9057  	e.KillReason = r
  9058  	e.Details["kill_reason"] = r
  9059  	return e
  9060  }
  9061  
  9062  func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent {
  9063  	e.StartDelay = int64(delay)
  9064  	e.Details["start_delay"] = fmt.Sprintf("%d", delay)
  9065  	return e
  9066  }
  9067  
  9068  func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent {
  9069  	e.RestartReason = reason
  9070  	e.Details["restart_reason"] = reason
  9071  	return e
  9072  }
  9073  
  9074  func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent {
  9075  	e.TaskSignalReason = r
  9076  	e.Details["task_signal_reason"] = r
  9077  	return e
  9078  }
  9079  
  9080  func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent {
  9081  	e.TaskSignal = s.String()
  9082  	e.Details["task_signal"] = s.String()
  9083  	return e
  9084  }
  9085  
  9086  func (e *TaskEvent) SetDownloadError(err error) *TaskEvent {
  9087  	if err != nil {
  9088  		e.DownloadError = err.Error()
  9089  		e.Details["download_error"] = err.Error()
  9090  	}
  9091  	return e
  9092  }
  9093  
  9094  func (e *TaskEvent) SetValidationError(err error) *TaskEvent {
  9095  	if err != nil {
  9096  		e.ValidationError = err.Error()
  9097  		e.Details["validation_error"] = err.Error()
  9098  	}
  9099  	return e
  9100  }
  9101  
  9102  func (e *TaskEvent) SetKillTimeout(timeout, maxTimeout time.Duration) *TaskEvent {
  9103  	actual := helper.Min(timeout, maxTimeout)
  9104  	e.KillTimeout = actual
  9105  	e.Details["kill_timeout"] = actual.String()
  9106  	return e
  9107  }
  9108  
  9109  func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent {
  9110  	e.DiskLimit = limit
  9111  	e.Details["disk_limit"] = fmt.Sprintf("%d", limit)
  9112  	return e
  9113  }
  9114  
  9115  func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent {
  9116  	e.FailedSibling = sibling
  9117  	e.Details["failed_sibling"] = sibling
  9118  	return e
  9119  }
  9120  
  9121  func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent {
  9122  	if err != nil {
  9123  		e.VaultError = err.Error()
  9124  		e.Details["vault_renewal_error"] = err.Error()
  9125  	}
  9126  	return e
  9127  }
  9128  
  9129  func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent {
  9130  	e.DriverMessage = m
  9131  	e.Details["driver_message"] = m
  9132  	return e
  9133  }
  9134  
  9135  func (e *TaskEvent) SetOOMKilled(oom bool) *TaskEvent {
  9136  	e.Details["oom_killed"] = strconv.FormatBool(oom)
  9137  	return e
  9138  }
  9139  
  9140  // TaskArtifact is an artifact to download before running the task.
  9141  type TaskArtifact struct {
  9142  	// GetterSource is the source to download an artifact using go-getter
  9143  	GetterSource string
  9144  
  9145  	// GetterOptions are options to use when downloading the artifact using
  9146  	// go-getter.
  9147  	GetterOptions map[string]string
  9148  
  9149  	// GetterHeaders are headers to use when downloading the artifact using
  9150  	// go-getter.
  9151  	GetterHeaders map[string]string
  9152  
  9153  	// GetterMode is the go-getter.ClientMode for fetching resources.
  9154  	// Defaults to "any" but can be set to "file" or "dir".
  9155  	GetterMode string
  9156  
  9157  	// RelativeDest is the download destination given relative to the task's
  9158  	// directory.
  9159  	RelativeDest string
  9160  }
  9161  
  9162  func (ta *TaskArtifact) Equal(o *TaskArtifact) bool {
  9163  	if ta == nil || o == nil {
  9164  		return ta == o
  9165  	}
  9166  	switch {
  9167  	case ta.GetterSource != o.GetterSource:
  9168  		return false
  9169  	case !maps.Equal(ta.GetterOptions, o.GetterOptions):
  9170  		return false
  9171  	case !maps.Equal(ta.GetterHeaders, o.GetterHeaders):
  9172  		return false
  9173  	case ta.GetterMode != o.GetterMode:
  9174  		return false
  9175  	case ta.RelativeDest != o.RelativeDest:
  9176  		return false
  9177  	}
  9178  	return true
  9179  }
  9180  
  9181  func (ta *TaskArtifact) Copy() *TaskArtifact {
  9182  	if ta == nil {
  9183  		return nil
  9184  	}
  9185  	return &TaskArtifact{
  9186  		GetterSource:  ta.GetterSource,
  9187  		GetterOptions: maps.Clone(ta.GetterOptions),
  9188  		GetterHeaders: maps.Clone(ta.GetterHeaders),
  9189  		GetterMode:    ta.GetterMode,
  9190  		RelativeDest:  ta.RelativeDest,
  9191  	}
  9192  }
  9193  
  9194  func (ta *TaskArtifact) GoString() string {
  9195  	return fmt.Sprintf("%+v", ta)
  9196  }
  9197  
  9198  // DiffID fulfills the DiffableWithID interface.
  9199  func (ta *TaskArtifact) DiffID() string {
  9200  	return ta.RelativeDest
  9201  }
  9202  
  9203  // hashStringMap appends a deterministic hash of m onto h.
  9204  func hashStringMap(h hash.Hash, m map[string]string) {
  9205  	keys := make([]string, 0, len(m))
  9206  	for k := range m {
  9207  		keys = append(keys, k)
  9208  	}
  9209  	sort.Strings(keys)
  9210  	for _, k := range keys {
  9211  		_, _ = h.Write([]byte(k))
  9212  		_, _ = h.Write([]byte(m[k]))
  9213  	}
  9214  }
  9215  
  9216  // Hash creates a unique identifier for a TaskArtifact as the same GetterSource
  9217  // may be specified multiple times with different destinations.
  9218  func (ta *TaskArtifact) Hash() string {
  9219  	h, err := blake2b.New256(nil)
  9220  	if err != nil {
  9221  		panic(err)
  9222  	}
  9223  
  9224  	_, _ = h.Write([]byte(ta.GetterSource))
  9225  
  9226  	hashStringMap(h, ta.GetterOptions)
  9227  	hashStringMap(h, ta.GetterHeaders)
  9228  
  9229  	_, _ = h.Write([]byte(ta.GetterMode))
  9230  	_, _ = h.Write([]byte(ta.RelativeDest))
  9231  	return base64.RawStdEncoding.EncodeToString(h.Sum(nil))
  9232  }
  9233  
  9234  func (ta *TaskArtifact) Validate() error {
  9235  	// Verify the source
  9236  	var mErr multierror.Error
  9237  	if ta.GetterSource == "" {
  9238  		mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified"))
  9239  	}
  9240  
  9241  	switch ta.GetterMode {
  9242  	case "":
  9243  		// Default to any
  9244  		ta.GetterMode = GetterModeAny
  9245  	case GetterModeAny, GetterModeFile, GetterModeDir:
  9246  		// Ok
  9247  	default:
  9248  		mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s",
  9249  			ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
  9250  	}
  9251  
  9252  	escaped, err := escapingfs.PathEscapesAllocViaRelative("task", ta.RelativeDest)
  9253  	if err != nil {
  9254  		mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
  9255  	} else if escaped {
  9256  		mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
  9257  	}
  9258  
  9259  	if err := ta.validateChecksum(); err != nil {
  9260  		mErr.Errors = append(mErr.Errors, err)
  9261  	}
  9262  
  9263  	return mErr.ErrorOrNil()
  9264  }
  9265  
  9266  func (ta *TaskArtifact) validateChecksum() error {
  9267  	check, ok := ta.GetterOptions["checksum"]
  9268  	if !ok {
  9269  		return nil
  9270  	}
  9271  
  9272  	// Job struct validation occurs before interpolation resolution can be effective.
  9273  	// Skip checking if checksum contain variable reference, and artifacts fetching will
  9274  	// eventually fail, if checksum is indeed invalid.
  9275  	if args.ContainsEnv(check) {
  9276  		return nil
  9277  	}
  9278  
  9279  	check = strings.TrimSpace(check)
  9280  	if check == "" {
  9281  		return fmt.Errorf("checksum value cannot be empty")
  9282  	}
  9283  
  9284  	parts := strings.Split(check, ":")
  9285  	if l := len(parts); l != 2 {
  9286  		return fmt.Errorf(`checksum must be given as "type:value"; got %q`, check)
  9287  	}
  9288  
  9289  	checksumVal := parts[1]
  9290  	checksumBytes, err := hex.DecodeString(checksumVal)
  9291  	if err != nil {
  9292  		return fmt.Errorf("invalid checksum: %v", err)
  9293  	}
  9294  
  9295  	checksumType := parts[0]
  9296  	expectedLength := 0
  9297  	switch checksumType {
  9298  	case "md5":
  9299  		expectedLength = md5.Size
  9300  	case "sha1":
  9301  		expectedLength = sha1.Size
  9302  	case "sha256":
  9303  		expectedLength = sha256.Size
  9304  	case "sha512":
  9305  		expectedLength = sha512.Size
  9306  	default:
  9307  		return fmt.Errorf("unsupported checksum type: %s", checksumType)
  9308  	}
  9309  
  9310  	if len(checksumBytes) != expectedLength {
  9311  		return fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal)
  9312  	}
  9313  
  9314  	return nil
  9315  }
  9316  
  9317  const (
  9318  	ConstraintDistinctProperty  = "distinct_property"
  9319  	ConstraintDistinctHosts     = "distinct_hosts"
  9320  	ConstraintRegex             = "regexp"
  9321  	ConstraintVersion           = "version"
  9322  	ConstraintSemver            = "semver"
  9323  	ConstraintSetContains       = "set_contains"
  9324  	ConstraintSetContainsAll    = "set_contains_all"
  9325  	ConstraintSetContainsAny    = "set_contains_any"
  9326  	ConstraintAttributeIsSet    = "is_set"
  9327  	ConstraintAttributeIsNotSet = "is_not_set"
  9328  )
  9329  
  9330  // A Constraint is used to restrict placement options.
  9331  type Constraint struct {
  9332  	LTarget string // Left-hand target
  9333  	RTarget string // Right-hand target
  9334  	Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near
  9335  }
  9336  
  9337  // Equal checks if two constraints are equal.
  9338  func (c *Constraint) Equal(o *Constraint) bool {
  9339  	return c == o ||
  9340  		c.LTarget == o.LTarget &&
  9341  			c.RTarget == o.RTarget &&
  9342  			c.Operand == o.Operand
  9343  }
  9344  
  9345  func (c *Constraint) Copy() *Constraint {
  9346  	if c == nil {
  9347  		return nil
  9348  	}
  9349  	return &Constraint{
  9350  		LTarget: c.LTarget,
  9351  		RTarget: c.RTarget,
  9352  		Operand: c.Operand,
  9353  	}
  9354  }
  9355  
  9356  func (c *Constraint) String() string {
  9357  	return fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
  9358  }
  9359  
  9360  func (c *Constraint) Validate() error {
  9361  	var mErr multierror.Error
  9362  	if c.Operand == "" {
  9363  		mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand"))
  9364  	}
  9365  
  9366  	// requireLtarget specifies whether the constraint requires an LTarget to be
  9367  	// provided.
  9368  	requireLtarget := true
  9369  
  9370  	// Perform additional validation based on operand
  9371  	switch c.Operand {
  9372  	case ConstraintDistinctHosts:
  9373  		requireLtarget = false
  9374  	case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains:
  9375  		if c.RTarget == "" {
  9376  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget"))
  9377  		}
  9378  	case ConstraintRegex:
  9379  		if _, err := regexp.Compile(c.RTarget); err != nil {
  9380  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
  9381  		}
  9382  	case ConstraintVersion:
  9383  		if _, err := version.NewConstraint(c.RTarget); err != nil {
  9384  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err))
  9385  		}
  9386  	case ConstraintSemver:
  9387  		if _, err := semver.NewConstraint(c.RTarget); err != nil {
  9388  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Semver constraint is invalid: %v", err))
  9389  		}
  9390  	case ConstraintDistinctProperty:
  9391  		// If a count is set, make sure it is convertible to a uint64
  9392  		if c.RTarget != "" {
  9393  			count, err := strconv.ParseUint(c.RTarget, 10, 64)
  9394  			if err != nil {
  9395  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err))
  9396  			} else if count < 1 {
  9397  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count))
  9398  			}
  9399  		}
  9400  	case ConstraintAttributeIsSet, ConstraintAttributeIsNotSet:
  9401  		if c.RTarget != "" {
  9402  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q does not support an RTarget", c.Operand))
  9403  		}
  9404  	case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
  9405  		if c.RTarget == "" {
  9406  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand))
  9407  		}
  9408  	default:
  9409  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand))
  9410  	}
  9411  
  9412  	// Ensure we have an LTarget for the constraints that need one
  9413  	if requireLtarget && c.LTarget == "" {
  9414  		mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint"))
  9415  	}
  9416  
  9417  	return mErr.ErrorOrNil()
  9418  }
  9419  
  9420  type Constraints []*Constraint
  9421  
  9422  // Equal compares Constraints as a set
  9423  func (xs *Constraints) Equal(ys *Constraints) bool {
  9424  	if xs == ys {
  9425  		return true
  9426  	}
  9427  	if xs == nil || ys == nil {
  9428  		return false
  9429  	}
  9430  	if len(*xs) != len(*ys) {
  9431  		return false
  9432  	}
  9433  SETEQUALS:
  9434  	for _, x := range *xs {
  9435  		for _, y := range *ys {
  9436  			if x.Equal(y) {
  9437  				continue SETEQUALS
  9438  			}
  9439  		}
  9440  		return false
  9441  	}
  9442  	return true
  9443  }
  9444  
  9445  // Affinity is used to score placement options based on a weight
  9446  type Affinity struct {
  9447  	LTarget string // Left-hand target
  9448  	RTarget string // Right-hand target
  9449  	Operand string // Affinity operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any
  9450  	Weight  int8   // Weight applied to nodes that match the affinity. Can be negative
  9451  }
  9452  
  9453  // Equal checks if two affinities are equal.
  9454  func (a *Affinity) Equal(o *Affinity) bool {
  9455  	if a == nil || o == nil {
  9456  		return a == o
  9457  	}
  9458  	switch {
  9459  	case a.LTarget != o.LTarget:
  9460  		return false
  9461  	case a.RTarget != o.RTarget:
  9462  		return false
  9463  	case a.Operand != o.Operand:
  9464  		return false
  9465  	case a.Weight != o.Weight:
  9466  		return false
  9467  	}
  9468  	return true
  9469  }
  9470  
  9471  func (a *Affinity) Copy() *Affinity {
  9472  	if a == nil {
  9473  		return nil
  9474  	}
  9475  	return &Affinity{
  9476  		LTarget: a.LTarget,
  9477  		RTarget: a.RTarget,
  9478  		Operand: a.Operand,
  9479  		Weight:  a.Weight,
  9480  	}
  9481  }
  9482  
  9483  func (a *Affinity) String() string {
  9484  	return fmt.Sprintf("%s %s %s %v", a.LTarget, a.Operand, a.RTarget, a.Weight)
  9485  }
  9486  
  9487  func (a *Affinity) Validate() error {
  9488  	var mErr multierror.Error
  9489  	if a.Operand == "" {
  9490  		mErr.Errors = append(mErr.Errors, errors.New("Missing affinity operand"))
  9491  	}
  9492  
  9493  	// Perform additional validation based on operand
  9494  	switch a.Operand {
  9495  	case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains:
  9496  		if a.RTarget == "" {
  9497  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains operators require an RTarget"))
  9498  		}
  9499  	case ConstraintRegex:
  9500  		if _, err := regexp.Compile(a.RTarget); err != nil {
  9501  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
  9502  		}
  9503  	case ConstraintVersion:
  9504  		if _, err := version.NewConstraint(a.RTarget); err != nil {
  9505  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Version affinity is invalid: %v", err))
  9506  		}
  9507  	case ConstraintSemver:
  9508  		if _, err := semver.NewConstraint(a.RTarget); err != nil {
  9509  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Semver affinity is invalid: %v", err))
  9510  		}
  9511  	case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
  9512  		if a.RTarget == "" {
  9513  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", a.Operand))
  9514  		}
  9515  	default:
  9516  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown affinity operator %q", a.Operand))
  9517  	}
  9518  
  9519  	// Ensure we have an LTarget
  9520  	if a.LTarget == "" {
  9521  		mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required"))
  9522  	}
  9523  
  9524  	// Ensure that weight is between -100 and 100, and not zero
  9525  	if a.Weight == 0 {
  9526  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight cannot be zero"))
  9527  	}
  9528  
  9529  	if a.Weight > 100 || a.Weight < -100 {
  9530  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight must be within the range [-100,100]"))
  9531  	}
  9532  
  9533  	return mErr.ErrorOrNil()
  9534  }
  9535  
  9536  // Spread is used to specify desired distribution of allocations according to weight
  9537  type Spread struct {
  9538  	// Attribute is the node attribute used as the spread criteria
  9539  	Attribute string
  9540  
  9541  	// Weight is the relative weight of this spread, useful when there are multiple
  9542  	// spread and affinities
  9543  	Weight int8
  9544  
  9545  	// SpreadTarget is used to describe desired percentages for each attribute value
  9546  	SpreadTarget []*SpreadTarget
  9547  
  9548  	// Memoized string representation
  9549  	str string
  9550  }
  9551  
  9552  func (s *Spread) Equal(o *Spread) bool {
  9553  	if s == nil || o == nil {
  9554  		return s == o
  9555  	}
  9556  	switch {
  9557  	case s.Attribute != o.Attribute:
  9558  		return false
  9559  	case s.Weight != o.Weight:
  9560  		return false
  9561  	case !slices.EqualFunc(s.SpreadTarget, o.SpreadTarget, func(a, b *SpreadTarget) bool { return a.Equal(b) }):
  9562  		return false
  9563  	}
  9564  	return true
  9565  }
  9566  
  9567  type Affinities []*Affinity
  9568  
  9569  // Equal compares Affinities as a set
  9570  func (xs *Affinities) Equal(ys *Affinities) bool {
  9571  	if xs == ys {
  9572  		return true
  9573  	}
  9574  	if xs == nil || ys == nil {
  9575  		return false
  9576  	}
  9577  	if len(*xs) != len(*ys) {
  9578  		return false
  9579  	}
  9580  SETEQUALS:
  9581  	for _, x := range *xs {
  9582  		for _, y := range *ys {
  9583  			if x.Equal(y) {
  9584  				continue SETEQUALS
  9585  			}
  9586  		}
  9587  		return false
  9588  	}
  9589  	return true
  9590  }
  9591  
  9592  func (s *Spread) Copy() *Spread {
  9593  	if s == nil {
  9594  		return nil
  9595  	}
  9596  	ns := new(Spread)
  9597  	*ns = *s
  9598  
  9599  	ns.SpreadTarget = CopySliceSpreadTarget(s.SpreadTarget)
  9600  	return ns
  9601  }
  9602  
  9603  func (s *Spread) String() string {
  9604  	if s.str != "" {
  9605  		return s.str
  9606  	}
  9607  	s.str = fmt.Sprintf("%s %s %v", s.Attribute, s.SpreadTarget, s.Weight)
  9608  	return s.str
  9609  }
  9610  
  9611  func (s *Spread) Validate() error {
  9612  	var mErr multierror.Error
  9613  	if s.Attribute == "" {
  9614  		mErr.Errors = append(mErr.Errors, errors.New("Missing spread attribute"))
  9615  	}
  9616  	if s.Weight <= 0 || s.Weight > 100 {
  9617  		mErr.Errors = append(mErr.Errors, errors.New("Spread block must have a positive weight from 0 to 100"))
  9618  	}
  9619  	seen := make(map[string]struct{})
  9620  	sumPercent := uint32(0)
  9621  
  9622  	for _, target := range s.SpreadTarget {
  9623  		// Make sure there are no duplicates
  9624  		_, ok := seen[target.Value]
  9625  		if !ok {
  9626  			seen[target.Value] = struct{}{}
  9627  		} else {
  9628  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Spread target value %q already defined", target.Value))
  9629  		}
  9630  		if target.Percent > 100 {
  9631  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Spread target percentage for value %q must be between 0 and 100", target.Value))
  9632  		}
  9633  		sumPercent += uint32(target.Percent)
  9634  	}
  9635  	if sumPercent > 100 {
  9636  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", sumPercent))
  9637  	}
  9638  	return mErr.ErrorOrNil()
  9639  }
  9640  
  9641  // SpreadTarget is used to specify desired percentages for each attribute value
  9642  type SpreadTarget struct {
  9643  	// Value is a single attribute value, like "dc1"
  9644  	Value string
  9645  
  9646  	// Percent is the desired percentage of allocs
  9647  	Percent uint8
  9648  
  9649  	// Memoized string representation
  9650  	str string
  9651  }
  9652  
  9653  func (s *SpreadTarget) Copy() *SpreadTarget {
  9654  	if s == nil {
  9655  		return nil
  9656  	}
  9657  
  9658  	ns := new(SpreadTarget)
  9659  	*ns = *s
  9660  	return ns
  9661  }
  9662  
  9663  func (s *SpreadTarget) String() string {
  9664  	if s.str != "" {
  9665  		return s.str
  9666  	}
  9667  	s.str = fmt.Sprintf("%q %v%%", s.Value, s.Percent)
  9668  	return s.str
  9669  }
  9670  
  9671  func (s *SpreadTarget) Equal(o *SpreadTarget) bool {
  9672  	if s == nil || o == nil {
  9673  		return s == o
  9674  	}
  9675  	switch {
  9676  	case s.Value != o.Value:
  9677  		return false
  9678  	case s.Percent != o.Percent:
  9679  		return false
  9680  	}
  9681  	return true
  9682  }
  9683  
  9684  // EphemeralDisk is an ephemeral disk object
  9685  type EphemeralDisk struct {
  9686  	// Sticky indicates whether the allocation is sticky to a node
  9687  	Sticky bool
  9688  
  9689  	// SizeMB is the size of the local disk
  9690  	SizeMB int
  9691  
  9692  	// Migrate determines if Nomad client should migrate the allocation dir for
  9693  	// sticky allocations
  9694  	Migrate bool
  9695  }
  9696  
  9697  // DefaultEphemeralDisk returns a EphemeralDisk with default configurations
  9698  func DefaultEphemeralDisk() *EphemeralDisk {
  9699  	return &EphemeralDisk{
  9700  		SizeMB: 300,
  9701  	}
  9702  }
  9703  
  9704  func (d *EphemeralDisk) Equal(o *EphemeralDisk) bool {
  9705  	if d == nil || o == nil {
  9706  		return d == o
  9707  	}
  9708  	switch {
  9709  	case d.Sticky != o.Sticky:
  9710  		return false
  9711  	case d.SizeMB != o.SizeMB:
  9712  		return false
  9713  	case d.Migrate != o.Migrate:
  9714  		return false
  9715  	}
  9716  	return true
  9717  }
  9718  
  9719  // Validate validates EphemeralDisk
  9720  func (d *EphemeralDisk) Validate() error {
  9721  	if d.SizeMB < 10 {
  9722  		return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB)
  9723  	}
  9724  	return nil
  9725  }
  9726  
  9727  // Copy copies the EphemeralDisk struct and returns a new one
  9728  func (d *EphemeralDisk) Copy() *EphemeralDisk {
  9729  	ld := new(EphemeralDisk)
  9730  	*ld = *d
  9731  	return ld
  9732  }
  9733  
  9734  var (
  9735  	// VaultUnrecoverableError matches unrecoverable errors returned by a Vault
  9736  	// server
  9737  	VaultUnrecoverableError = regexp.MustCompile(`Code:\s+40(0|3|4)`)
  9738  )
  9739  
  9740  const (
  9741  	// VaultChangeModeNoop takes no action when a new token is retrieved.
  9742  	VaultChangeModeNoop = "noop"
  9743  
  9744  	// VaultChangeModeSignal signals the task when a new token is retrieved.
  9745  	VaultChangeModeSignal = "signal"
  9746  
  9747  	// VaultChangeModeRestart restarts the task when a new token is retrieved.
  9748  	VaultChangeModeRestart = "restart"
  9749  )
  9750  
  9751  // Vault stores the set of permissions a task needs access to from Vault.
  9752  type Vault struct {
  9753  	// Policies is the set of policies that the task needs access to
  9754  	Policies []string
  9755  
  9756  	// Namespace is the vault namespace that should be used.
  9757  	Namespace string
  9758  
  9759  	// Env marks whether the Vault Token should be exposed as an environment
  9760  	// variable
  9761  	Env bool
  9762  
  9763  	// DisableFile marks whether the Vault Token should be exposed in the file
  9764  	// vault_token in the task's secrets directory.
  9765  	DisableFile bool
  9766  
  9767  	// ChangeMode is used to configure the task's behavior when the Vault
  9768  	// token changes because the original token could not be renewed in time.
  9769  	ChangeMode string
  9770  
  9771  	// ChangeSignal is the signal sent to the task when a new token is
  9772  	// retrieved. This is only valid when using the signal change mode.
  9773  	ChangeSignal string
  9774  }
  9775  
  9776  func (v *Vault) Equal(o *Vault) bool {
  9777  	if v == nil || o == nil {
  9778  		return v == o
  9779  	}
  9780  	switch {
  9781  	case !slices.Equal(v.Policies, o.Policies):
  9782  		return false
  9783  	case v.Namespace != o.Namespace:
  9784  		return false
  9785  	case v.Env != o.Env:
  9786  		return false
  9787  	case v.DisableFile != o.DisableFile:
  9788  		return false
  9789  	case v.ChangeMode != o.ChangeMode:
  9790  		return false
  9791  	case v.ChangeSignal != o.ChangeSignal:
  9792  		return false
  9793  	}
  9794  	return true
  9795  }
  9796  
  9797  // Copy returns a copy of this Vault block.
  9798  func (v *Vault) Copy() *Vault {
  9799  	if v == nil {
  9800  		return nil
  9801  	}
  9802  
  9803  	nv := new(Vault)
  9804  	*nv = *v
  9805  	return nv
  9806  }
  9807  
  9808  func (v *Vault) Canonicalize() {
  9809  	if v.ChangeSignal != "" {
  9810  		v.ChangeSignal = strings.ToUpper(v.ChangeSignal)
  9811  	}
  9812  
  9813  	if v.ChangeMode == "" {
  9814  		v.ChangeMode = VaultChangeModeRestart
  9815  	}
  9816  }
  9817  
  9818  // Validate returns if the Vault block is valid.
  9819  func (v *Vault) Validate() error {
  9820  	if v == nil {
  9821  		return nil
  9822  	}
  9823  
  9824  	var mErr multierror.Error
  9825  	if len(v.Policies) == 0 {
  9826  		_ = multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty"))
  9827  	}
  9828  
  9829  	for _, p := range v.Policies {
  9830  		if p == "root" {
  9831  			_ = multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy"))
  9832  		}
  9833  	}
  9834  
  9835  	switch v.ChangeMode {
  9836  	case VaultChangeModeSignal:
  9837  		if v.ChangeSignal == "" {
  9838  			_ = multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal))
  9839  		}
  9840  	case VaultChangeModeNoop, VaultChangeModeRestart:
  9841  	default:
  9842  		_ = multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode))
  9843  	}
  9844  
  9845  	return mErr.ErrorOrNil()
  9846  }
  9847  
  9848  const (
  9849  	// DeploymentStatuses are the various states a deployment can be be in
  9850  	DeploymentStatusRunning      = "running"
  9851  	DeploymentStatusPaused       = "paused"
  9852  	DeploymentStatusFailed       = "failed"
  9853  	DeploymentStatusSuccessful   = "successful"
  9854  	DeploymentStatusCancelled    = "cancelled"
  9855  	DeploymentStatusInitializing = "initializing"
  9856  	DeploymentStatusPending      = "pending"
  9857  	DeploymentStatusBlocked      = "blocked"
  9858  	DeploymentStatusUnblocking   = "unblocking"
  9859  
  9860  	// TODO Statuses and Descriptions do not match 1:1 and we sometimes use the Description as a status flag
  9861  
  9862  	// DeploymentStatusDescriptions are the various descriptions of the states a
  9863  	// deployment can be in.
  9864  	DeploymentStatusDescriptionRunning               = "Deployment is running"
  9865  	DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires manual promotion"
  9866  	DeploymentStatusDescriptionRunningAutoPromotion  = "Deployment is running pending automatic promotion"
  9867  	DeploymentStatusDescriptionPaused                = "Deployment is paused"
  9868  	DeploymentStatusDescriptionSuccessful            = "Deployment completed successfully"
  9869  	DeploymentStatusDescriptionStoppedJob            = "Cancelled because job is stopped"
  9870  	DeploymentStatusDescriptionNewerJob              = "Cancelled due to newer version of job"
  9871  	DeploymentStatusDescriptionFailedAllocations     = "Failed due to unhealthy allocations"
  9872  	DeploymentStatusDescriptionProgressDeadline      = "Failed due to progress deadline"
  9873  	DeploymentStatusDescriptionFailedByUser          = "Deployment marked as failed"
  9874  
  9875  	// used only in multiregion deployments
  9876  	DeploymentStatusDescriptionFailedByPeer   = "Failed because of an error in peer region"
  9877  	DeploymentStatusDescriptionBlocked        = "Deployment is complete but waiting for peer region"
  9878  	DeploymentStatusDescriptionUnblocking     = "Deployment is unblocking remaining regions"
  9879  	DeploymentStatusDescriptionPendingForPeer = "Deployment is pending, waiting for peer region"
  9880  )
  9881  
  9882  // DeploymentStatusDescriptionRollback is used to get the status description of
  9883  // a deployment when rolling back to an older job.
  9884  func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string {
  9885  	return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion)
  9886  }
  9887  
  9888  // DeploymentStatusDescriptionRollbackNoop is used to get the status description of
  9889  // a deployment when rolling back is not possible because it has the same specification
  9890  func DeploymentStatusDescriptionRollbackNoop(baseDescription string, jobVersion uint64) string {
  9891  	return fmt.Sprintf("%s - not rolling back to stable job version %d as current job has same specification", baseDescription, jobVersion)
  9892  }
  9893  
  9894  // DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of
  9895  // a deployment when there is no target to rollback to but autorevert is desired.
  9896  func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string {
  9897  	return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription)
  9898  }
  9899  
  9900  // Deployment is the object that represents a job deployment which is used to
  9901  // transition a job between versions.
  9902  type Deployment struct {
  9903  	// ID is a generated UUID for the deployment
  9904  	ID string
  9905  
  9906  	// Namespace is the namespace the deployment is created in
  9907  	Namespace string
  9908  
  9909  	// JobID is the job the deployment is created for
  9910  	JobID string
  9911  
  9912  	// JobVersion is the version of the job at which the deployment is tracking
  9913  	JobVersion uint64
  9914  
  9915  	// JobModifyIndex is the ModifyIndex of the job which the deployment is
  9916  	// tracking.
  9917  	JobModifyIndex uint64
  9918  
  9919  	// JobSpecModifyIndex is the JobModifyIndex of the job which the
  9920  	// deployment is tracking.
  9921  	JobSpecModifyIndex uint64
  9922  
  9923  	// JobCreateIndex is the create index of the job which the deployment is
  9924  	// tracking. It is needed so that if the job gets stopped and reran we can
  9925  	// present the correct list of deployments for the job and not old ones.
  9926  	JobCreateIndex uint64
  9927  
  9928  	// Multiregion specifies if deployment is part of multiregion deployment
  9929  	IsMultiregion bool
  9930  
  9931  	// TaskGroups is the set of task groups effected by the deployment and their
  9932  	// current deployment status.
  9933  	TaskGroups map[string]*DeploymentState
  9934  
  9935  	// The status of the deployment
  9936  	Status string
  9937  
  9938  	// StatusDescription allows a human readable description of the deployment
  9939  	// status.
  9940  	StatusDescription string
  9941  
  9942  	// EvalPriority tracks the priority of the evaluation which lead to the
  9943  	// creation of this Deployment object. Any additional evaluations created
  9944  	// as a result of this deployment can therefore inherit this value, which
  9945  	// is not guaranteed to be that of the job priority parameter.
  9946  	EvalPriority int
  9947  
  9948  	CreateIndex uint64
  9949  	ModifyIndex uint64
  9950  }
  9951  
  9952  // NewDeployment creates a new deployment given the job.
  9953  func NewDeployment(job *Job, evalPriority int) *Deployment {
  9954  	return &Deployment{
  9955  		ID:                 uuid.Generate(),
  9956  		Namespace:          job.Namespace,
  9957  		JobID:              job.ID,
  9958  		JobVersion:         job.Version,
  9959  		JobModifyIndex:     job.ModifyIndex,
  9960  		JobSpecModifyIndex: job.JobModifyIndex,
  9961  		JobCreateIndex:     job.CreateIndex,
  9962  		IsMultiregion:      job.IsMultiregion(),
  9963  		Status:             DeploymentStatusRunning,
  9964  		StatusDescription:  DeploymentStatusDescriptionRunning,
  9965  		TaskGroups:         make(map[string]*DeploymentState, len(job.TaskGroups)),
  9966  		EvalPriority:       evalPriority,
  9967  	}
  9968  }
  9969  
  9970  func (d *Deployment) Copy() *Deployment {
  9971  	if d == nil {
  9972  		return nil
  9973  	}
  9974  
  9975  	c := &Deployment{}
  9976  	*c = *d
  9977  
  9978  	c.TaskGroups = nil
  9979  	if l := len(d.TaskGroups); d.TaskGroups != nil {
  9980  		c.TaskGroups = make(map[string]*DeploymentState, l)
  9981  		for tg, s := range d.TaskGroups {
  9982  			c.TaskGroups[tg] = s.Copy()
  9983  		}
  9984  	}
  9985  
  9986  	return c
  9987  }
  9988  
  9989  // Active returns whether the deployment is active or terminal.
  9990  func (d *Deployment) Active() bool {
  9991  	switch d.Status {
  9992  	case DeploymentStatusRunning, DeploymentStatusPaused, DeploymentStatusBlocked,
  9993  		DeploymentStatusUnblocking, DeploymentStatusInitializing, DeploymentStatusPending:
  9994  		return true
  9995  	default:
  9996  		return false
  9997  	}
  9998  }
  9999  
 10000  // GetID is a helper for getting the ID when the object may be nil
 10001  func (d *Deployment) GetID() string {
 10002  	if d == nil {
 10003  		return ""
 10004  	}
 10005  	return d.ID
 10006  }
 10007  
 10008  // GetCreateIndex implements the CreateIndexGetter interface, required for
 10009  // pagination.
 10010  func (d *Deployment) GetCreateIndex() uint64 {
 10011  	if d == nil {
 10012  		return 0
 10013  	}
 10014  	return d.CreateIndex
 10015  }
 10016  
 10017  // HasPlacedCanaries returns whether the deployment has placed canaries
 10018  func (d *Deployment) HasPlacedCanaries() bool {
 10019  	if d == nil || len(d.TaskGroups) == 0 {
 10020  		return false
 10021  	}
 10022  	for _, group := range d.TaskGroups {
 10023  		if len(group.PlacedCanaries) != 0 {
 10024  			return true
 10025  		}
 10026  	}
 10027  	return false
 10028  }
 10029  
 10030  // RequiresPromotion returns whether the deployment requires promotion to
 10031  // continue
 10032  func (d *Deployment) RequiresPromotion() bool {
 10033  	if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
 10034  		return false
 10035  	}
 10036  	for _, group := range d.TaskGroups {
 10037  		if group.DesiredCanaries > 0 && !group.Promoted {
 10038  			return true
 10039  		}
 10040  	}
 10041  	return false
 10042  }
 10043  
 10044  // HasAutoPromote determines if all taskgroups are marked auto_promote
 10045  func (d *Deployment) HasAutoPromote() bool {
 10046  	if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
 10047  		return false
 10048  	}
 10049  	for _, group := range d.TaskGroups {
 10050  		if group.DesiredCanaries > 0 && !group.AutoPromote {
 10051  			return false
 10052  		}
 10053  	}
 10054  	return true
 10055  }
 10056  
 10057  func (d *Deployment) GoString() string {
 10058  	base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription)
 10059  	for group, state := range d.TaskGroups {
 10060  		base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state)
 10061  	}
 10062  	return base
 10063  }
 10064  
 10065  // GetNamespace implements the NamespaceGetter interface, required for pagination.
 10066  func (d *Deployment) GetNamespace() string {
 10067  	if d == nil {
 10068  		return ""
 10069  	}
 10070  	return d.Namespace
 10071  }
 10072  
 10073  // DeploymentState tracks the state of a deployment for a given task group.
 10074  type DeploymentState struct {
 10075  	// AutoRevert marks whether the task group has indicated the job should be
 10076  	// reverted on failure
 10077  	AutoRevert bool
 10078  
 10079  	// AutoPromote marks promotion triggered automatically by healthy canaries
 10080  	// copied from TaskGroup UpdateStrategy in scheduler.reconcile
 10081  	AutoPromote bool
 10082  
 10083  	// ProgressDeadline is the deadline by which an allocation must transition
 10084  	// to healthy before the deployment is considered failed. This value is set
 10085  	// by the jobspec `update.progress_deadline` field.
 10086  	ProgressDeadline time.Duration
 10087  
 10088  	// RequireProgressBy is the time by which an allocation must transition to
 10089  	// healthy before the deployment is considered failed. This value is reset
 10090  	// to "now" + ProgressDeadline when an allocation updates the deployment.
 10091  	RequireProgressBy time.Time
 10092  
 10093  	// Promoted marks whether the canaries have been promoted
 10094  	Promoted bool
 10095  
 10096  	// PlacedCanaries is the set of placed canary allocations
 10097  	PlacedCanaries []string
 10098  
 10099  	// DesiredCanaries is the number of canaries that should be created.
 10100  	DesiredCanaries int
 10101  
 10102  	// DesiredTotal is the total number of allocations that should be created as
 10103  	// part of the deployment.
 10104  	DesiredTotal int
 10105  
 10106  	// PlacedAllocs is the number of allocations that have been placed
 10107  	PlacedAllocs int
 10108  
 10109  	// HealthyAllocs is the number of allocations that have been marked healthy.
 10110  	HealthyAllocs int
 10111  
 10112  	// UnhealthyAllocs are allocations that have been marked as unhealthy.
 10113  	UnhealthyAllocs int
 10114  }
 10115  
 10116  func (d *DeploymentState) GoString() string {
 10117  	base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal)
 10118  	base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries)
 10119  	base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries)
 10120  	base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted)
 10121  	base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs)
 10122  	base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs)
 10123  	base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs)
 10124  	base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert)
 10125  	base += fmt.Sprintf("\n\tAutoPromote: %v", d.AutoPromote)
 10126  	return base
 10127  }
 10128  
 10129  func (d *DeploymentState) Copy() *DeploymentState {
 10130  	c := &DeploymentState{}
 10131  	*c = *d
 10132  	c.PlacedCanaries = slices.Clone(d.PlacedCanaries)
 10133  	return c
 10134  }
 10135  
 10136  // DeploymentStatusUpdate is used to update the status of a given deployment
 10137  type DeploymentStatusUpdate struct {
 10138  	// DeploymentID is the ID of the deployment to update
 10139  	DeploymentID string
 10140  
 10141  	// Status is the new status of the deployment.
 10142  	Status string
 10143  
 10144  	// StatusDescription is the new status description of the deployment.
 10145  	StatusDescription string
 10146  }
 10147  
 10148  // RescheduleTracker encapsulates previous reschedule events
 10149  type RescheduleTracker struct {
 10150  	Events []*RescheduleEvent
 10151  }
 10152  
 10153  func (rt *RescheduleTracker) Copy() *RescheduleTracker {
 10154  	if rt == nil {
 10155  		return nil
 10156  	}
 10157  	nt := &RescheduleTracker{}
 10158  	*nt = *rt
 10159  	rescheduleEvents := make([]*RescheduleEvent, 0, len(rt.Events))
 10160  	for _, tracker := range rt.Events {
 10161  		rescheduleEvents = append(rescheduleEvents, tracker.Copy())
 10162  	}
 10163  	nt.Events = rescheduleEvents
 10164  	return nt
 10165  }
 10166  
 10167  func (rt *RescheduleTracker) RescheduleEligible(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
 10168  	if reschedulePolicy == nil {
 10169  		return false
 10170  	}
 10171  	attempts := reschedulePolicy.Attempts
 10172  	enabled := attempts > 0 || reschedulePolicy.Unlimited
 10173  	if !enabled {
 10174  		return false
 10175  	}
 10176  	if reschedulePolicy.Unlimited {
 10177  		return true
 10178  	}
 10179  	// Early return true if there are no attempts yet and the number of allowed attempts is > 0
 10180  	if (rt == nil || len(rt.Events) == 0) && attempts > 0 {
 10181  		return true
 10182  	}
 10183  	attempted, _ := rt.rescheduleInfo(reschedulePolicy, failTime)
 10184  	return attempted < attempts
 10185  }
 10186  
 10187  func (rt *RescheduleTracker) rescheduleInfo(reschedulePolicy *ReschedulePolicy, failTime time.Time) (int, int) {
 10188  	if reschedulePolicy == nil {
 10189  		return 0, 0
 10190  	}
 10191  	attempts := reschedulePolicy.Attempts
 10192  	interval := reschedulePolicy.Interval
 10193  
 10194  	attempted := 0
 10195  	if rt != nil && attempts > 0 {
 10196  		for j := len(rt.Events) - 1; j >= 0; j-- {
 10197  			lastAttempt := rt.Events[j].RescheduleTime
 10198  			timeDiff := failTime.UTC().UnixNano() - lastAttempt
 10199  			if timeDiff < interval.Nanoseconds() {
 10200  				attempted += 1
 10201  			}
 10202  		}
 10203  	}
 10204  	return attempted, attempts
 10205  }
 10206  
 10207  // RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation
 10208  type RescheduleEvent struct {
 10209  	// RescheduleTime is the timestamp of a reschedule attempt
 10210  	RescheduleTime int64
 10211  
 10212  	// PrevAllocID is the ID of the previous allocation being restarted
 10213  	PrevAllocID string
 10214  
 10215  	// PrevNodeID is the node ID of the previous allocation
 10216  	PrevNodeID string
 10217  
 10218  	// Delay is the reschedule delay associated with the attempt
 10219  	Delay time.Duration
 10220  }
 10221  
 10222  func NewRescheduleEvent(rescheduleTime int64, prevAllocID string, prevNodeID string, delay time.Duration) *RescheduleEvent {
 10223  	return &RescheduleEvent{RescheduleTime: rescheduleTime,
 10224  		PrevAllocID: prevAllocID,
 10225  		PrevNodeID:  prevNodeID,
 10226  		Delay:       delay}
 10227  }
 10228  
 10229  func (re *RescheduleEvent) Copy() *RescheduleEvent {
 10230  	if re == nil {
 10231  		return nil
 10232  	}
 10233  	copy := new(RescheduleEvent)
 10234  	*copy = *re
 10235  	return copy
 10236  }
 10237  
 10238  // DesiredTransition is used to mark an allocation as having a desired state
 10239  // transition. This information can be used by the scheduler to make the
 10240  // correct decision.
 10241  type DesiredTransition struct {
 10242  	// Migrate is used to indicate that this allocation should be stopped and
 10243  	// migrated to another node.
 10244  	Migrate *bool
 10245  
 10246  	// Reschedule is used to indicate that this allocation is eligible to be
 10247  	// rescheduled. Most allocations are automatically eligible for
 10248  	// rescheduling, so this field is only required when an allocation is not
 10249  	// automatically eligible. An example is an allocation that is part of a
 10250  	// deployment.
 10251  	Reschedule *bool
 10252  
 10253  	// ForceReschedule is used to indicate that this allocation must be rescheduled.
 10254  	// This field is only used when operators want to force a placement even if
 10255  	// a failed allocation is not eligible to be rescheduled
 10256  	ForceReschedule *bool
 10257  
 10258  	// NoShutdownDelay, if set to true, will override the group and
 10259  	// task shutdown_delay configuration and ignore the delay for any
 10260  	// allocations stopped as a result of this Deregister call.
 10261  	NoShutdownDelay *bool
 10262  }
 10263  
 10264  // Merge merges the two desired transitions, preferring the values from the
 10265  // passed in object.
 10266  func (d *DesiredTransition) Merge(o *DesiredTransition) {
 10267  	if o.Migrate != nil {
 10268  		d.Migrate = o.Migrate
 10269  	}
 10270  
 10271  	if o.Reschedule != nil {
 10272  		d.Reschedule = o.Reschedule
 10273  	}
 10274  
 10275  	if o.ForceReschedule != nil {
 10276  		d.ForceReschedule = o.ForceReschedule
 10277  	}
 10278  
 10279  	if o.NoShutdownDelay != nil {
 10280  		d.NoShutdownDelay = o.NoShutdownDelay
 10281  	}
 10282  }
 10283  
 10284  // ShouldMigrate returns whether the transition object dictates a migration.
 10285  func (d *DesiredTransition) ShouldMigrate() bool {
 10286  	return d.Migrate != nil && *d.Migrate
 10287  }
 10288  
 10289  // ShouldReschedule returns whether the transition object dictates a
 10290  // rescheduling.
 10291  func (d *DesiredTransition) ShouldReschedule() bool {
 10292  	return d.Reschedule != nil && *d.Reschedule
 10293  }
 10294  
 10295  // ShouldForceReschedule returns whether the transition object dictates a
 10296  // forced rescheduling.
 10297  func (d *DesiredTransition) ShouldForceReschedule() bool {
 10298  	if d == nil {
 10299  		return false
 10300  	}
 10301  	return d.ForceReschedule != nil && *d.ForceReschedule
 10302  }
 10303  
 10304  // ShouldIgnoreShutdownDelay returns whether the transition object dictates
 10305  // that shutdown skip any shutdown delays.
 10306  func (d *DesiredTransition) ShouldIgnoreShutdownDelay() bool {
 10307  	if d == nil {
 10308  		return false
 10309  	}
 10310  	return d.NoShutdownDelay != nil && *d.NoShutdownDelay
 10311  }
 10312  
 10313  const (
 10314  	AllocDesiredStatusRun   = "run"   // Allocation should run
 10315  	AllocDesiredStatusStop  = "stop"  // Allocation should stop
 10316  	AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
 10317  )
 10318  
 10319  const (
 10320  	AllocClientStatusPending  = "pending"
 10321  	AllocClientStatusRunning  = "running"
 10322  	AllocClientStatusComplete = "complete"
 10323  	AllocClientStatusFailed   = "failed"
 10324  	AllocClientStatusLost     = "lost"
 10325  	AllocClientStatusUnknown  = "unknown"
 10326  )
 10327  
 10328  // Allocation is used to allocate the placement of a task group to a node.
 10329  type Allocation struct {
 10330  	// msgpack omit empty fields during serialization
 10331  	_struct bool `codec:",omitempty"` // nolint: structcheck
 10332  
 10333  	// ID of the allocation (UUID)
 10334  	ID string
 10335  
 10336  	// Namespace is the namespace the allocation is created in
 10337  	Namespace string
 10338  
 10339  	// ID of the evaluation that generated this allocation
 10340  	EvalID string
 10341  
 10342  	// Name is a logical name of the allocation.
 10343  	Name string
 10344  
 10345  	// NodeID is the node this is being placed on
 10346  	NodeID string
 10347  
 10348  	// NodeName is the name of the node this is being placed on.
 10349  	NodeName string
 10350  
 10351  	// Job is the parent job of the task group being allocated.
 10352  	// This is copied at allocation time to avoid issues if the job
 10353  	// definition is updated.
 10354  	JobID string
 10355  	Job   *Job
 10356  
 10357  	// TaskGroup is the name of the task group that should be run
 10358  	TaskGroup string
 10359  
 10360  	// COMPAT(0.11): Remove in 0.11
 10361  	// Resources is the total set of resources allocated as part
 10362  	// of this allocation of the task group. Dynamic ports will be set by
 10363  	// the scheduler.
 10364  	Resources *Resources
 10365  
 10366  	// SharedResources are the resources that are shared by all the tasks in an
 10367  	// allocation
 10368  	// Deprecated: use AllocatedResources.Shared instead.
 10369  	// Keep field to allow us to handle upgrade paths from old versions
 10370  	SharedResources *Resources
 10371  
 10372  	// TaskResources is the set of resources allocated to each
 10373  	// task. These should sum to the total Resources. Dynamic ports will be
 10374  	// set by the scheduler.
 10375  	// Deprecated: use AllocatedResources.Tasks instead.
 10376  	// Keep field to allow us to handle upgrade paths from old versions
 10377  	TaskResources map[string]*Resources
 10378  
 10379  	// AllocatedResources is the total resources allocated for the task group.
 10380  	AllocatedResources *AllocatedResources
 10381  
 10382  	// Metrics associated with this allocation
 10383  	Metrics *AllocMetric
 10384  
 10385  	// Desired Status of the allocation on the client
 10386  	DesiredStatus string
 10387  
 10388  	// DesiredStatusDescription is meant to provide more human useful information
 10389  	DesiredDescription string
 10390  
 10391  	// DesiredTransition is used to indicate that a state transition
 10392  	// is desired for a given reason.
 10393  	DesiredTransition DesiredTransition
 10394  
 10395  	// Status of the allocation on the client
 10396  	ClientStatus string
 10397  
 10398  	// ClientStatusDescription is meant to provide more human useful information
 10399  	ClientDescription string
 10400  
 10401  	// TaskStates stores the state of each task,
 10402  	TaskStates map[string]*TaskState
 10403  
 10404  	// AllocStates track meta data associated with changes to the state of the whole allocation, like becoming lost
 10405  	AllocStates []*AllocState
 10406  
 10407  	// PreviousAllocation is the allocation that this allocation is replacing
 10408  	PreviousAllocation string
 10409  
 10410  	// NextAllocation is the allocation that this allocation is being replaced by
 10411  	NextAllocation string
 10412  
 10413  	// DeploymentID identifies an allocation as being created from a
 10414  	// particular deployment
 10415  	DeploymentID string
 10416  
 10417  	// DeploymentStatus captures the status of the allocation as part of the
 10418  	// given deployment
 10419  	DeploymentStatus *AllocDeploymentStatus
 10420  
 10421  	// RescheduleTrackers captures details of previous reschedule attempts of the allocation
 10422  	RescheduleTracker *RescheduleTracker
 10423  
 10424  	// NetworkStatus captures networking details of an allocation known at runtime
 10425  	NetworkStatus *AllocNetworkStatus
 10426  
 10427  	// FollowupEvalID captures a follow up evaluation created to handle a failed allocation
 10428  	// that can be rescheduled in the future
 10429  	FollowupEvalID string
 10430  
 10431  	// PreemptedAllocations captures IDs of any allocations that were preempted
 10432  	// in order to place this allocation
 10433  	PreemptedAllocations []string
 10434  
 10435  	// PreemptedByAllocation tracks the alloc ID of the allocation that caused this allocation
 10436  	// to stop running because it got preempted
 10437  	PreemptedByAllocation string
 10438  
 10439  	// SignedIdentities is a map of task names to signed identity/capability
 10440  	// claim tokens for those tasks. If needed, it is populated in the plan
 10441  	// applier.
 10442  	SignedIdentities map[string]string `json:"-"`
 10443  
 10444  	// SigningKeyID is the key used to sign the SignedIdentities field.
 10445  	SigningKeyID string
 10446  
 10447  	// Raft Indexes
 10448  	CreateIndex uint64
 10449  	ModifyIndex uint64
 10450  
 10451  	// AllocModifyIndex is not updated when the client updates allocations. This
 10452  	// lets the client pull only the allocs updated by the server.
 10453  	AllocModifyIndex uint64
 10454  
 10455  	// CreateTime is the time the allocation has finished scheduling and been
 10456  	// verified by the plan applier.
 10457  	CreateTime int64
 10458  
 10459  	// ModifyTime is the time the allocation was last updated.
 10460  	ModifyTime int64
 10461  }
 10462  
 10463  // GetID implements the IDGetter interface, required for pagination.
 10464  func (a *Allocation) GetID() string {
 10465  	if a == nil {
 10466  		return ""
 10467  	}
 10468  	return a.ID
 10469  }
 10470  
 10471  // GetNamespace implements the NamespaceGetter interface, required for
 10472  // pagination and filtering namespaces in endpoints that support glob namespace
 10473  // requests using tokens with limited access.
 10474  func (a *Allocation) GetNamespace() string {
 10475  	if a == nil {
 10476  		return ""
 10477  	}
 10478  	return a.Namespace
 10479  }
 10480  
 10481  // GetCreateIndex implements the CreateIndexGetter interface, required for
 10482  // pagination.
 10483  func (a *Allocation) GetCreateIndex() uint64 {
 10484  	if a == nil {
 10485  		return 0
 10486  	}
 10487  	return a.CreateIndex
 10488  }
 10489  
 10490  // ConsulNamespace returns the Consul namespace of the task group associated
 10491  // with this allocation.
 10492  func (a *Allocation) ConsulNamespace() string {
 10493  	return a.Job.LookupTaskGroup(a.TaskGroup).Consul.GetNamespace()
 10494  }
 10495  
 10496  func (a *Allocation) JobNamespacedID() NamespacedID {
 10497  	return NewNamespacedID(a.JobID, a.Namespace)
 10498  }
 10499  
 10500  // Index returns the index of the allocation. If the allocation is from a task
 10501  // group with count greater than 1, there will be multiple allocations for it.
 10502  func (a *Allocation) Index() uint {
 10503  	l := len(a.Name)
 10504  	prefix := len(a.JobID) + len(a.TaskGroup) + 2
 10505  	if l <= 3 || l <= prefix {
 10506  		return uint(0)
 10507  	}
 10508  
 10509  	strNum := a.Name[prefix : len(a.Name)-1]
 10510  	num, _ := strconv.Atoi(strNum)
 10511  	return uint(num)
 10512  }
 10513  
 10514  // Copy provides a copy of the allocation and deep copies the job
 10515  func (a *Allocation) Copy() *Allocation {
 10516  	return a.copyImpl(true)
 10517  }
 10518  
 10519  // CopySkipJob provides a copy of the allocation but doesn't deep copy the job
 10520  func (a *Allocation) CopySkipJob() *Allocation {
 10521  	return a.copyImpl(false)
 10522  }
 10523  
 10524  // Canonicalize Allocation to ensure fields are initialized to the expectations
 10525  // of this version of Nomad. Should be called when restoring persisted
 10526  // Allocations or receiving Allocations from Nomad agents potentially on an
 10527  // older version of Nomad.
 10528  func (a *Allocation) Canonicalize() {
 10529  	if a.AllocatedResources == nil && a.TaskResources != nil {
 10530  		ar := AllocatedResources{}
 10531  
 10532  		tasks := make(map[string]*AllocatedTaskResources, len(a.TaskResources))
 10533  		for name, tr := range a.TaskResources {
 10534  			atr := AllocatedTaskResources{}
 10535  			atr.Cpu.CpuShares = int64(tr.CPU)
 10536  			atr.Memory.MemoryMB = int64(tr.MemoryMB)
 10537  			atr.Networks = tr.Networks.Copy()
 10538  
 10539  			tasks[name] = &atr
 10540  		}
 10541  		ar.Tasks = tasks
 10542  
 10543  		if a.SharedResources != nil {
 10544  			ar.Shared.DiskMB = int64(a.SharedResources.DiskMB)
 10545  			ar.Shared.Networks = a.SharedResources.Networks.Copy()
 10546  		}
 10547  
 10548  		a.AllocatedResources = &ar
 10549  	}
 10550  
 10551  	a.Job.Canonicalize()
 10552  }
 10553  
 10554  func (a *Allocation) copyImpl(job bool) *Allocation {
 10555  	if a == nil {
 10556  		return nil
 10557  	}
 10558  	na := new(Allocation)
 10559  	*na = *a
 10560  
 10561  	if job {
 10562  		na.Job = na.Job.Copy()
 10563  	}
 10564  
 10565  	na.AllocatedResources = na.AllocatedResources.Copy()
 10566  	na.Resources = na.Resources.Copy()
 10567  	na.SharedResources = na.SharedResources.Copy()
 10568  
 10569  	if a.TaskResources != nil {
 10570  		tr := make(map[string]*Resources, len(na.TaskResources))
 10571  		for task, resource := range na.TaskResources {
 10572  			tr[task] = resource.Copy()
 10573  		}
 10574  		na.TaskResources = tr
 10575  	}
 10576  
 10577  	na.Metrics = na.Metrics.Copy()
 10578  	na.DeploymentStatus = na.DeploymentStatus.Copy()
 10579  
 10580  	if a.TaskStates != nil {
 10581  		ts := make(map[string]*TaskState, len(na.TaskStates))
 10582  		for task, state := range na.TaskStates {
 10583  			ts[task] = state.Copy()
 10584  		}
 10585  		na.TaskStates = ts
 10586  	}
 10587  
 10588  	na.RescheduleTracker = a.RescheduleTracker.Copy()
 10589  	na.PreemptedAllocations = slices.Clone(a.PreemptedAllocations)
 10590  	return na
 10591  }
 10592  
 10593  // TerminalStatus returns if the desired or actual status is terminal and
 10594  // will no longer transition.
 10595  func (a *Allocation) TerminalStatus() bool {
 10596  	// First check the desired state and if that isn't terminal, check client
 10597  	// state.
 10598  	return a.ServerTerminalStatus() || a.ClientTerminalStatus()
 10599  }
 10600  
 10601  // ServerTerminalStatus returns true if the desired state of the allocation is terminal
 10602  func (a *Allocation) ServerTerminalStatus() bool {
 10603  	switch a.DesiredStatus {
 10604  	case AllocDesiredStatusStop, AllocDesiredStatusEvict:
 10605  		return true
 10606  	default:
 10607  		return false
 10608  	}
 10609  }
 10610  
 10611  // ClientTerminalStatus returns if the client status is terminal and will no longer transition
 10612  func (a *Allocation) ClientTerminalStatus() bool {
 10613  	switch a.ClientStatus {
 10614  	case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
 10615  		return true
 10616  	default:
 10617  		return false
 10618  	}
 10619  }
 10620  
 10621  // ShouldReschedule returns if the allocation is eligible to be rescheduled according
 10622  // to its status and ReschedulePolicy given its failure time
 10623  func (a *Allocation) ShouldReschedule(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
 10624  	// First check the desired state
 10625  	switch a.DesiredStatus {
 10626  	case AllocDesiredStatusStop, AllocDesiredStatusEvict:
 10627  		return false
 10628  	default:
 10629  	}
 10630  	switch a.ClientStatus {
 10631  	case AllocClientStatusFailed:
 10632  		return a.RescheduleEligible(reschedulePolicy, failTime)
 10633  	default:
 10634  		return false
 10635  	}
 10636  }
 10637  
 10638  // RescheduleEligible returns if the allocation is eligible to be rescheduled according
 10639  // to its ReschedulePolicy and the current state of its reschedule trackers
 10640  func (a *Allocation) RescheduleEligible(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
 10641  	return a.RescheduleTracker.RescheduleEligible(reschedulePolicy, failTime)
 10642  }
 10643  
 10644  func (a *Allocation) RescheduleInfo() (int, int) {
 10645  	return a.RescheduleTracker.rescheduleInfo(a.ReschedulePolicy(), a.LastEventTime())
 10646  }
 10647  
 10648  // LastEventTime is the time of the last task event in the allocation.
 10649  // It is used to determine allocation failure time. If the FinishedAt field
 10650  // is not set, the alloc's modify time is used
 10651  func (a *Allocation) LastEventTime() time.Time {
 10652  	var lastEventTime time.Time
 10653  	if a.TaskStates != nil {
 10654  		for _, s := range a.TaskStates {
 10655  			if lastEventTime.IsZero() || s.FinishedAt.After(lastEventTime) {
 10656  				lastEventTime = s.FinishedAt
 10657  			}
 10658  		}
 10659  	}
 10660  
 10661  	if lastEventTime.IsZero() {
 10662  		return time.Unix(0, a.ModifyTime).UTC()
 10663  	}
 10664  	return lastEventTime
 10665  }
 10666  
 10667  // ReschedulePolicy returns the reschedule policy based on the task group
 10668  func (a *Allocation) ReschedulePolicy() *ReschedulePolicy {
 10669  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
 10670  	if tg == nil {
 10671  		return nil
 10672  	}
 10673  	return tg.ReschedulePolicy
 10674  }
 10675  
 10676  // MigrateStrategy returns the migrate strategy based on the task group
 10677  func (a *Allocation) MigrateStrategy() *MigrateStrategy {
 10678  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
 10679  	if tg == nil {
 10680  		return nil
 10681  	}
 10682  	return tg.Migrate
 10683  }
 10684  
 10685  // NextRescheduleTime returns a time on or after which the allocation is eligible to be rescheduled,
 10686  // and whether the next reschedule time is within policy's interval if the policy doesn't allow unlimited reschedules
 10687  func (a *Allocation) NextRescheduleTime() (time.Time, bool) {
 10688  	failTime := a.LastEventTime()
 10689  	reschedulePolicy := a.ReschedulePolicy()
 10690  	if a.DesiredStatus == AllocDesiredStatusStop || a.ClientStatus != AllocClientStatusFailed || failTime.IsZero() || reschedulePolicy == nil {
 10691  		return time.Time{}, false
 10692  	}
 10693  
 10694  	return a.nextRescheduleTime(failTime, reschedulePolicy)
 10695  }
 10696  
 10697  func (a *Allocation) nextRescheduleTime(failTime time.Time, reschedulePolicy *ReschedulePolicy) (time.Time, bool) {
 10698  	nextDelay := a.NextDelay()
 10699  	nextRescheduleTime := failTime.Add(nextDelay)
 10700  	rescheduleEligible := reschedulePolicy.Unlimited || (reschedulePolicy.Attempts > 0 && a.RescheduleTracker == nil)
 10701  	if reschedulePolicy.Attempts > 0 && a.RescheduleTracker != nil && a.RescheduleTracker.Events != nil {
 10702  		// Check for eligibility based on the interval if max attempts is set
 10703  		attempted, attempts := a.RescheduleTracker.rescheduleInfo(reschedulePolicy, failTime)
 10704  		rescheduleEligible = attempted < attempts && nextDelay < reschedulePolicy.Interval
 10705  	}
 10706  	return nextRescheduleTime, rescheduleEligible
 10707  }
 10708  
 10709  // NextRescheduleTimeByFailTime works like NextRescheduleTime but allows callers
 10710  // specify a failure time. Useful for things like determining whether to reschedule
 10711  // an alloc on a disconnected node.
 10712  func (a *Allocation) NextRescheduleTimeByFailTime(failTime time.Time) (time.Time, bool) {
 10713  	reschedulePolicy := a.ReschedulePolicy()
 10714  	if reschedulePolicy == nil {
 10715  		return time.Time{}, false
 10716  	}
 10717  
 10718  	return a.nextRescheduleTime(failTime, reschedulePolicy)
 10719  }
 10720  
 10721  // ShouldClientStop tests an alloc for StopAfterClientDisconnect configuration
 10722  func (a *Allocation) ShouldClientStop() bool {
 10723  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
 10724  	if tg == nil ||
 10725  		tg.StopAfterClientDisconnect == nil ||
 10726  		*tg.StopAfterClientDisconnect == 0*time.Nanosecond {
 10727  		return false
 10728  	}
 10729  	return true
 10730  }
 10731  
 10732  // WaitClientStop uses the reschedule delay mechanism to block rescheduling until
 10733  // StopAfterClientDisconnect's block interval passes
 10734  func (a *Allocation) WaitClientStop() time.Time {
 10735  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
 10736  
 10737  	// An alloc can only be marked lost once, so use the first lost transition
 10738  	var t time.Time
 10739  	for _, s := range a.AllocStates {
 10740  		if s.Field == AllocStateFieldClientStatus &&
 10741  			s.Value == AllocClientStatusLost {
 10742  			t = s.Time
 10743  			break
 10744  		}
 10745  	}
 10746  
 10747  	// On the first pass, the alloc hasn't been marked lost yet, and so we start
 10748  	// counting from now
 10749  	if t.IsZero() {
 10750  		t = time.Now().UTC()
 10751  	}
 10752  
 10753  	// Find the max kill timeout
 10754  	kill := DefaultKillTimeout
 10755  	for _, t := range tg.Tasks {
 10756  		if t.KillTimeout > kill {
 10757  			kill = t.KillTimeout
 10758  		}
 10759  	}
 10760  
 10761  	return t.Add(*tg.StopAfterClientDisconnect + kill)
 10762  }
 10763  
 10764  // DisconnectTimeout uses the MaxClientDisconnect to compute when the allocation
 10765  // should transition to lost.
 10766  func (a *Allocation) DisconnectTimeout(now time.Time) time.Time {
 10767  	if a == nil || a.Job == nil {
 10768  		return now
 10769  	}
 10770  
 10771  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
 10772  
 10773  	timeout := tg.MaxClientDisconnect
 10774  
 10775  	if timeout == nil {
 10776  		return now
 10777  	}
 10778  
 10779  	return now.Add(*timeout)
 10780  }
 10781  
 10782  // SupportsDisconnectedClients determines whether both the server and the task group
 10783  // are configured to allow the allocation to reconnect after network connectivity
 10784  // has been lost and then restored.
 10785  func (a *Allocation) SupportsDisconnectedClients(serverSupportsDisconnectedClients bool) bool {
 10786  	if !serverSupportsDisconnectedClients {
 10787  		return false
 10788  	}
 10789  
 10790  	if a.Job != nil {
 10791  		tg := a.Job.LookupTaskGroup(a.TaskGroup)
 10792  		if tg != nil {
 10793  			return tg.MaxClientDisconnect != nil
 10794  		}
 10795  	}
 10796  
 10797  	return false
 10798  }
 10799  
 10800  // NextDelay returns a duration after which the allocation can be rescheduled.
 10801  // It is calculated according to the delay function and previous reschedule attempts.
 10802  func (a *Allocation) NextDelay() time.Duration {
 10803  	policy := a.ReschedulePolicy()
 10804  	// Can be nil if the task group was updated to remove its reschedule policy
 10805  	if policy == nil {
 10806  		return 0
 10807  	}
 10808  	delayDur := policy.Delay
 10809  	if a.RescheduleTracker == nil || a.RescheduleTracker.Events == nil || len(a.RescheduleTracker.Events) == 0 {
 10810  		return delayDur
 10811  	}
 10812  	events := a.RescheduleTracker.Events
 10813  	switch policy.DelayFunction {
 10814  	case "exponential":
 10815  		delayDur = a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1].Delay * 2
 10816  	case "fibonacci":
 10817  		if len(events) >= 2 {
 10818  			fibN1Delay := events[len(events)-1].Delay
 10819  			fibN2Delay := events[len(events)-2].Delay
 10820  			// Handle reset of delay ceiling which should cause
 10821  			// a new series to start
 10822  			if fibN2Delay == policy.MaxDelay && fibN1Delay == policy.Delay {
 10823  				delayDur = fibN1Delay
 10824  			} else {
 10825  				delayDur = fibN1Delay + fibN2Delay
 10826  			}
 10827  		}
 10828  	default:
 10829  		return delayDur
 10830  	}
 10831  	if policy.MaxDelay > 0 && delayDur > policy.MaxDelay {
 10832  		delayDur = policy.MaxDelay
 10833  		// check if delay needs to be reset
 10834  
 10835  		lastRescheduleEvent := a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1]
 10836  		timeDiff := a.LastEventTime().UTC().UnixNano() - lastRescheduleEvent.RescheduleTime
 10837  		if timeDiff > delayDur.Nanoseconds() {
 10838  			delayDur = policy.Delay
 10839  		}
 10840  
 10841  	}
 10842  
 10843  	return delayDur
 10844  }
 10845  
 10846  // Terminated returns if the allocation is in a terminal state on a client.
 10847  func (a *Allocation) Terminated() bool {
 10848  	if a.ClientStatus == AllocClientStatusFailed ||
 10849  		a.ClientStatus == AllocClientStatusComplete ||
 10850  		a.ClientStatus == AllocClientStatusLost {
 10851  		return true
 10852  	}
 10853  	return false
 10854  }
 10855  
 10856  // SetStop updates the allocation in place to a DesiredStatus stop, with the ClientStatus
 10857  func (a *Allocation) SetStop(clientStatus, clientDesc string) {
 10858  	a.DesiredStatus = AllocDesiredStatusStop
 10859  	a.ClientStatus = clientStatus
 10860  	a.ClientDescription = clientDesc
 10861  	a.AppendState(AllocStateFieldClientStatus, clientStatus)
 10862  }
 10863  
 10864  // AppendState creates and appends an AllocState entry recording the time of the state
 10865  // transition. Used to mark the transition to lost
 10866  func (a *Allocation) AppendState(field AllocStateField, value string) {
 10867  	a.AllocStates = append(a.AllocStates, &AllocState{
 10868  		Field: field,
 10869  		Value: value,
 10870  		Time:  time.Now().UTC(),
 10871  	})
 10872  }
 10873  
 10874  // RanSuccessfully returns whether the client has ran the allocation and all
 10875  // tasks finished successfully. Critically this function returns whether the
 10876  // allocation has ran to completion and not just that the alloc has converged to
 10877  // its desired state. That is to say that a batch allocation must have finished
 10878  // with exit code 0 on all task groups. This doesn't really have meaning on a
 10879  // non-batch allocation because a service and system allocation should not
 10880  // finish.
 10881  func (a *Allocation) RanSuccessfully() bool {
 10882  	// Handle the case the client hasn't started the allocation.
 10883  	if len(a.TaskStates) == 0 {
 10884  		return false
 10885  	}
 10886  
 10887  	// Check to see if all the tasks finished successfully in the allocation
 10888  	allSuccess := true
 10889  	for _, state := range a.TaskStates {
 10890  		allSuccess = allSuccess && state.Successful()
 10891  	}
 10892  
 10893  	return allSuccess
 10894  }
 10895  
 10896  // ShouldMigrate returns if the allocation needs data migration
 10897  func (a *Allocation) ShouldMigrate() bool {
 10898  	if a.PreviousAllocation == "" {
 10899  		return false
 10900  	}
 10901  
 10902  	if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict {
 10903  		return false
 10904  	}
 10905  
 10906  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
 10907  
 10908  	// if the task group is nil or the ephemeral disk block isn't present then
 10909  	// we won't migrate
 10910  	if tg == nil || tg.EphemeralDisk == nil {
 10911  		return false
 10912  	}
 10913  
 10914  	// We won't migrate any data if the user hasn't enabled migration
 10915  	return tg.EphemeralDisk.Migrate
 10916  }
 10917  
 10918  // SetEventDisplayMessages populates the display message if its not already set,
 10919  // a temporary fix to handle old allocations that don't have it.
 10920  // This method will be removed in a future release.
 10921  func (a *Allocation) SetEventDisplayMessages() {
 10922  	setDisplayMsg(a.TaskStates)
 10923  }
 10924  
 10925  // ComparableResources returns the resources on the allocation
 10926  // handling upgrade paths. After 0.11 calls to this should be replaced with:
 10927  // alloc.AllocatedResources.Comparable()
 10928  //
 10929  // COMPAT(0.11): Remove in 0.11
 10930  func (a *Allocation) ComparableResources() *ComparableResources {
 10931  	// Alloc already has 0.9+ behavior
 10932  	if a.AllocatedResources != nil {
 10933  		return a.AllocatedResources.Comparable()
 10934  	}
 10935  
 10936  	var resources *Resources
 10937  	if a.Resources != nil {
 10938  		resources = a.Resources
 10939  	} else if a.TaskResources != nil {
 10940  		resources = new(Resources)
 10941  		resources.Add(a.SharedResources)
 10942  		for _, taskResource := range a.TaskResources {
 10943  			resources.Add(taskResource)
 10944  		}
 10945  	}
 10946  
 10947  	// Upgrade path
 10948  	return &ComparableResources{
 10949  		Flattened: AllocatedTaskResources{
 10950  			Cpu: AllocatedCpuResources{
 10951  				CpuShares: int64(resources.CPU),
 10952  			},
 10953  			Memory: AllocatedMemoryResources{
 10954  				MemoryMB:    int64(resources.MemoryMB),
 10955  				MemoryMaxMB: int64(resources.MemoryMaxMB),
 10956  			},
 10957  			Networks: resources.Networks,
 10958  		},
 10959  		Shared: AllocatedSharedResources{
 10960  			DiskMB: int64(resources.DiskMB),
 10961  		},
 10962  	}
 10963  }
 10964  
 10965  // LookupTask by name from the Allocation. Returns nil if the Job is not set, the
 10966  // TaskGroup does not exist, or the task name cannot be found.
 10967  func (a *Allocation) LookupTask(name string) *Task {
 10968  	if a.Job == nil {
 10969  		return nil
 10970  	}
 10971  
 10972  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
 10973  	if tg == nil {
 10974  		return nil
 10975  	}
 10976  
 10977  	return tg.LookupTask(name)
 10978  }
 10979  
 10980  // Stub returns a list stub for the allocation
 10981  func (a *Allocation) Stub(fields *AllocStubFields) *AllocListStub {
 10982  	s := &AllocListStub{
 10983  		ID:                    a.ID,
 10984  		EvalID:                a.EvalID,
 10985  		Name:                  a.Name,
 10986  		Namespace:             a.Namespace,
 10987  		NodeID:                a.NodeID,
 10988  		NodeName:              a.NodeName,
 10989  		JobID:                 a.JobID,
 10990  		JobType:               a.Job.Type,
 10991  		JobVersion:            a.Job.Version,
 10992  		TaskGroup:             a.TaskGroup,
 10993  		DesiredStatus:         a.DesiredStatus,
 10994  		DesiredDescription:    a.DesiredDescription,
 10995  		ClientStatus:          a.ClientStatus,
 10996  		ClientDescription:     a.ClientDescription,
 10997  		DesiredTransition:     a.DesiredTransition,
 10998  		TaskStates:            a.TaskStates,
 10999  		DeploymentStatus:      a.DeploymentStatus,
 11000  		FollowupEvalID:        a.FollowupEvalID,
 11001  		NextAllocation:        a.NextAllocation,
 11002  		RescheduleTracker:     a.RescheduleTracker,
 11003  		PreemptedAllocations:  a.PreemptedAllocations,
 11004  		PreemptedByAllocation: a.PreemptedByAllocation,
 11005  		CreateIndex:           a.CreateIndex,
 11006  		ModifyIndex:           a.ModifyIndex,
 11007  		CreateTime:            a.CreateTime,
 11008  		ModifyTime:            a.ModifyTime,
 11009  	}
 11010  
 11011  	if fields != nil {
 11012  		if fields.Resources {
 11013  			s.AllocatedResources = a.AllocatedResources
 11014  		}
 11015  		if !fields.TaskStates {
 11016  			s.TaskStates = nil
 11017  		}
 11018  	}
 11019  
 11020  	return s
 11021  }
 11022  
 11023  // AllocationDiff converts an Allocation type to an AllocationDiff type
 11024  // If at any time, modification are made to AllocationDiff so that an
 11025  // Allocation can no longer be safely converted to AllocationDiff,
 11026  // this method should be changed accordingly.
 11027  func (a *Allocation) AllocationDiff() *AllocationDiff {
 11028  	return (*AllocationDiff)(a)
 11029  }
 11030  
 11031  // Expired determines whether an allocation has exceeded its MaxClientDisonnect
 11032  // duration relative to the passed time stamp.
 11033  func (a *Allocation) Expired(now time.Time) bool {
 11034  	if a == nil || a.Job == nil {
 11035  		return false
 11036  	}
 11037  
 11038  	// If alloc is not Unknown it cannot be expired.
 11039  	if a.ClientStatus != AllocClientStatusUnknown {
 11040  		return false
 11041  	}
 11042  
 11043  	lastUnknown := a.LastUnknown()
 11044  	if lastUnknown.IsZero() {
 11045  		return false
 11046  	}
 11047  
 11048  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
 11049  	if tg == nil {
 11050  		return false
 11051  	}
 11052  
 11053  	if tg.MaxClientDisconnect == nil {
 11054  		return false
 11055  	}
 11056  
 11057  	expiry := lastUnknown.Add(*tg.MaxClientDisconnect)
 11058  	return now.UTC().After(expiry) || now.UTC().Equal(expiry)
 11059  }
 11060  
 11061  // LastUnknown returns the timestamp for the last time the allocation
 11062  // transitioned into the unknown client status.
 11063  func (a *Allocation) LastUnknown() time.Time {
 11064  	var lastUnknown time.Time
 11065  
 11066  	for _, s := range a.AllocStates {
 11067  		if s.Field == AllocStateFieldClientStatus &&
 11068  			s.Value == AllocClientStatusUnknown {
 11069  			if lastUnknown.IsZero() || lastUnknown.Before(s.Time) {
 11070  				lastUnknown = s.Time
 11071  			}
 11072  		}
 11073  	}
 11074  
 11075  	return lastUnknown.UTC()
 11076  }
 11077  
 11078  // NeedsToReconnect returns true if the last known ClientStatus value is
 11079  // "unknown" and so the allocation did not reconnect yet.
 11080  func (a *Allocation) NeedsToReconnect() bool {
 11081  	disconnected := false
 11082  
 11083  	// AllocStates are appended to the list and we only need the latest
 11084  	// ClientStatus transition, so traverse from the end until we find one.
 11085  	for i := len(a.AllocStates) - 1; i >= 0; i-- {
 11086  		s := a.AllocStates[i]
 11087  		if s.Field != AllocStateFieldClientStatus {
 11088  			continue
 11089  		}
 11090  
 11091  		disconnected = s.Value == AllocClientStatusUnknown
 11092  		break
 11093  	}
 11094  
 11095  	return disconnected
 11096  }
 11097  
 11098  func (a *Allocation) ToIdentityClaims(job *Job) *IdentityClaims {
 11099  	now := jwt.NewNumericDate(time.Now().UTC())
 11100  	claims := &IdentityClaims{
 11101  		Namespace:    a.Namespace,
 11102  		JobID:        a.JobID,
 11103  		AllocationID: a.ID,
 11104  		RegisteredClaims: jwt.RegisteredClaims{
 11105  			// TODO: implement a refresh loop to prevent allocation identities from
 11106  			// expiring before the allocation is terminal. Once that's implemented,
 11107  			// add an ExpiresAt here ExpiresAt: &jwt.NumericDate{}
 11108  			// https://github.com/hernad/nomad/issues/16258
 11109  			NotBefore: now,
 11110  			IssuedAt:  now,
 11111  		},
 11112  	}
 11113  	if job != nil && job.ParentID != "" {
 11114  		claims.JobID = job.ParentID
 11115  	}
 11116  	return claims
 11117  }
 11118  
 11119  func (a *Allocation) ToTaskIdentityClaims(job *Job, taskName string) *IdentityClaims {
 11120  	claims := a.ToIdentityClaims(job)
 11121  	if claims != nil {
 11122  		claims.TaskName = taskName
 11123  	}
 11124  	return claims
 11125  }
 11126  
 11127  // IdentityClaims are the input to a JWT identifying a workload. It
 11128  // should never be serialized to msgpack unsigned.
 11129  type IdentityClaims struct {
 11130  	Namespace    string `json:"nomad_namespace"`
 11131  	JobID        string `json:"nomad_job_id"`
 11132  	AllocationID string `json:"nomad_allocation_id"`
 11133  	TaskName     string `json:"nomad_task"`
 11134  
 11135  	jwt.RegisteredClaims
 11136  }
 11137  
 11138  // AllocationDiff is another named type for Allocation (to use the same fields),
 11139  // which is used to represent the delta for an Allocation. If you need a method
 11140  // defined on the al
 11141  type AllocationDiff Allocation
 11142  
 11143  // AllocListStub is used to return a subset of alloc information
 11144  type AllocListStub struct {
 11145  	ID                    string
 11146  	EvalID                string
 11147  	Name                  string
 11148  	Namespace             string
 11149  	NodeID                string
 11150  	NodeName              string
 11151  	JobID                 string
 11152  	JobType               string
 11153  	JobVersion            uint64
 11154  	TaskGroup             string
 11155  	AllocatedResources    *AllocatedResources `json:",omitempty"`
 11156  	DesiredStatus         string
 11157  	DesiredDescription    string
 11158  	ClientStatus          string
 11159  	ClientDescription     string
 11160  	DesiredTransition     DesiredTransition
 11161  	TaskStates            map[string]*TaskState
 11162  	DeploymentStatus      *AllocDeploymentStatus
 11163  	FollowupEvalID        string
 11164  	NextAllocation        string
 11165  	RescheduleTracker     *RescheduleTracker
 11166  	PreemptedAllocations  []string
 11167  	PreemptedByAllocation string
 11168  	CreateIndex           uint64
 11169  	ModifyIndex           uint64
 11170  	CreateTime            int64
 11171  	ModifyTime            int64
 11172  }
 11173  
 11174  // SetEventDisplayMessages populates the display message if its not already
 11175  // set, a temporary fix to handle old allocations that don't have it. This
 11176  // method will be removed in a future release.
 11177  func (a *AllocListStub) SetEventDisplayMessages() {
 11178  	setDisplayMsg(a.TaskStates)
 11179  }
 11180  
 11181  // RescheduleEligible returns if the allocation is eligible to be rescheduled according
 11182  // to its ReschedulePolicy and the current state of its reschedule trackers
 11183  func (a *AllocListStub) RescheduleEligible(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
 11184  	return a.RescheduleTracker.RescheduleEligible(reschedulePolicy, failTime)
 11185  }
 11186  
 11187  func setDisplayMsg(taskStates map[string]*TaskState) {
 11188  	for _, taskState := range taskStates {
 11189  		for _, event := range taskState.Events {
 11190  			event.PopulateEventDisplayMessage()
 11191  		}
 11192  	}
 11193  }
 11194  
 11195  // AllocStubFields defines which fields are included in the AllocListStub.
 11196  type AllocStubFields struct {
 11197  	// Resources includes resource-related fields if true.
 11198  	Resources bool
 11199  
 11200  	// TaskStates removes the TaskStates field if false (default is to
 11201  	// include TaskStates).
 11202  	TaskStates bool
 11203  }
 11204  
 11205  func NewAllocStubFields() *AllocStubFields {
 11206  	return &AllocStubFields{
 11207  		// Maintain backward compatibility by retaining task states by
 11208  		// default.
 11209  		TaskStates: true,
 11210  	}
 11211  }
 11212  
 11213  // AllocMetric is used to track various metrics while attempting
 11214  // to make an allocation. These are used to debug a job, or to better
 11215  // understand the pressure within the system.
 11216  type AllocMetric struct {
 11217  	// NodesEvaluated is the number of nodes that were evaluated
 11218  	NodesEvaluated int
 11219  
 11220  	// NodesFiltered is the number of nodes filtered due to a constraint
 11221  	NodesFiltered int
 11222  
 11223  	// NodesInPool is the number of nodes in the node pool used by the job.
 11224  	NodesInPool int
 11225  
 11226  	// NodesAvailable is the number of nodes available for evaluation per DC.
 11227  	NodesAvailable map[string]int
 11228  
 11229  	// ClassFiltered is the number of nodes filtered by class
 11230  	ClassFiltered map[string]int
 11231  
 11232  	// ConstraintFiltered is the number of failures caused by constraint
 11233  	ConstraintFiltered map[string]int
 11234  
 11235  	// NodesExhausted is the number of nodes skipped due to being
 11236  	// exhausted of at least one resource
 11237  	NodesExhausted int
 11238  
 11239  	// ClassExhausted is the number of nodes exhausted by class
 11240  	ClassExhausted map[string]int
 11241  
 11242  	// DimensionExhausted provides the count by dimension or reason
 11243  	DimensionExhausted map[string]int
 11244  
 11245  	// QuotaExhausted provides the exhausted dimensions
 11246  	QuotaExhausted []string
 11247  
 11248  	// ResourcesExhausted provides the amount of resources exhausted by task
 11249  	// during the allocation placement
 11250  	ResourcesExhausted map[string]*Resources
 11251  
 11252  	// Scores is the scores of the final few nodes remaining
 11253  	// for placement. The top score is typically selected.
 11254  	// Deprecated: Replaced by ScoreMetaData in Nomad 0.9
 11255  	Scores map[string]float64
 11256  
 11257  	// ScoreMetaData is a slice of top scoring nodes displayed in the CLI
 11258  	ScoreMetaData []*NodeScoreMeta
 11259  
 11260  	// nodeScoreMeta is used to keep scores for a single node id. It is cleared out after
 11261  	// we receive normalized score during the last step of the scoring stack.
 11262  	nodeScoreMeta *NodeScoreMeta
 11263  
 11264  	// topScores is used to maintain a heap of the top K nodes with
 11265  	// the highest normalized score
 11266  	topScores *kheap.ScoreHeap
 11267  
 11268  	// AllocationTime is a measure of how long the allocation
 11269  	// attempt took. This can affect performance and SLAs.
 11270  	AllocationTime time.Duration
 11271  
 11272  	// CoalescedFailures indicates the number of other
 11273  	// allocations that were coalesced into this failed allocation.
 11274  	// This is to prevent creating many failed allocations for a
 11275  	// single task group.
 11276  	CoalescedFailures int
 11277  }
 11278  
 11279  func (a *AllocMetric) Copy() *AllocMetric {
 11280  	if a == nil {
 11281  		return nil
 11282  	}
 11283  	na := new(AllocMetric)
 11284  	*na = *a
 11285  	na.NodesAvailable = maps.Clone(na.NodesAvailable)
 11286  	na.ClassFiltered = maps.Clone(na.ClassFiltered)
 11287  	na.ConstraintFiltered = maps.Clone(na.ConstraintFiltered)
 11288  	na.ClassExhausted = maps.Clone(na.ClassExhausted)
 11289  	na.DimensionExhausted = maps.Clone(na.DimensionExhausted)
 11290  	na.QuotaExhausted = slices.Clone(na.QuotaExhausted)
 11291  	na.Scores = maps.Clone(na.Scores)
 11292  	na.ScoreMetaData = CopySliceNodeScoreMeta(na.ScoreMetaData)
 11293  	return na
 11294  }
 11295  
 11296  func (a *AllocMetric) EvaluateNode() {
 11297  	a.NodesEvaluated += 1
 11298  }
 11299  
 11300  func (a *AllocMetric) FilterNode(node *Node, constraint string) {
 11301  	a.NodesFiltered += 1
 11302  	if node != nil && node.NodeClass != "" {
 11303  		if a.ClassFiltered == nil {
 11304  			a.ClassFiltered = make(map[string]int)
 11305  		}
 11306  		a.ClassFiltered[node.NodeClass] += 1
 11307  	}
 11308  	if constraint != "" {
 11309  		if a.ConstraintFiltered == nil {
 11310  			a.ConstraintFiltered = make(map[string]int)
 11311  		}
 11312  		a.ConstraintFiltered[constraint] += 1
 11313  	}
 11314  }
 11315  
 11316  func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
 11317  	a.NodesExhausted += 1
 11318  	if node != nil && node.NodeClass != "" {
 11319  		if a.ClassExhausted == nil {
 11320  			a.ClassExhausted = make(map[string]int)
 11321  		}
 11322  		a.ClassExhausted[node.NodeClass] += 1
 11323  	}
 11324  	if dimension != "" {
 11325  		if a.DimensionExhausted == nil {
 11326  			a.DimensionExhausted = make(map[string]int)
 11327  		}
 11328  		a.DimensionExhausted[dimension] += 1
 11329  	}
 11330  }
 11331  
 11332  func (a *AllocMetric) ExhaustQuota(dimensions []string) {
 11333  	if a.QuotaExhausted == nil {
 11334  		a.QuotaExhausted = make([]string, 0, len(dimensions))
 11335  	}
 11336  
 11337  	a.QuotaExhausted = append(a.QuotaExhausted, dimensions...)
 11338  }
 11339  
 11340  // ExhaustResources updates the amount of resources exhausted for the
 11341  // allocation because of the given task group.
 11342  func (a *AllocMetric) ExhaustResources(tg *TaskGroup) {
 11343  	if a.DimensionExhausted == nil {
 11344  		return
 11345  	}
 11346  
 11347  	if a.ResourcesExhausted == nil {
 11348  		a.ResourcesExhausted = make(map[string]*Resources)
 11349  	}
 11350  
 11351  	for _, t := range tg.Tasks {
 11352  		exhaustedResources := a.ResourcesExhausted[t.Name]
 11353  		if exhaustedResources == nil {
 11354  			exhaustedResources = &Resources{}
 11355  		}
 11356  
 11357  		if a.DimensionExhausted["memory"] > 0 {
 11358  			exhaustedResources.MemoryMB += t.Resources.MemoryMB
 11359  		}
 11360  
 11361  		if a.DimensionExhausted["cpu"] > 0 {
 11362  			exhaustedResources.CPU += t.Resources.CPU
 11363  		}
 11364  
 11365  		a.ResourcesExhausted[t.Name] = exhaustedResources
 11366  	}
 11367  }
 11368  
 11369  // ScoreNode is used to gather top K scoring nodes in a heap
 11370  func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
 11371  	// Create nodeScoreMeta lazily if its the first time or if its a new node
 11372  	if a.nodeScoreMeta == nil || a.nodeScoreMeta.NodeID != node.ID {
 11373  		a.nodeScoreMeta = &NodeScoreMeta{
 11374  			NodeID: node.ID,
 11375  			Scores: make(map[string]float64),
 11376  		}
 11377  	}
 11378  	if name == NormScorerName {
 11379  		a.nodeScoreMeta.NormScore = score
 11380  		// Once we have the normalized score we can push to the heap
 11381  		// that tracks top K by normalized score
 11382  
 11383  		// Create the heap if its not there already
 11384  		if a.topScores == nil {
 11385  			a.topScores = kheap.NewScoreHeap(MaxRetainedNodeScores)
 11386  		}
 11387  		heap.Push(a.topScores, a.nodeScoreMeta)
 11388  
 11389  		// Clear out this entry because its now in the heap
 11390  		a.nodeScoreMeta = nil
 11391  	} else {
 11392  		a.nodeScoreMeta.Scores[name] = score
 11393  	}
 11394  }
 11395  
 11396  // PopulateScoreMetaData populates a map of scorer to scoring metadata
 11397  // The map is populated by popping elements from a heap of top K scores
 11398  // maintained per scorer
 11399  func (a *AllocMetric) PopulateScoreMetaData() {
 11400  	if a.topScores == nil {
 11401  		return
 11402  	}
 11403  
 11404  	if a.ScoreMetaData == nil {
 11405  		a.ScoreMetaData = make([]*NodeScoreMeta, a.topScores.Len())
 11406  	}
 11407  	heapItems := a.topScores.GetItemsReverse()
 11408  	for i, item := range heapItems {
 11409  		a.ScoreMetaData[i] = item.(*NodeScoreMeta)
 11410  	}
 11411  }
 11412  
 11413  // MaxNormScore returns the ScoreMetaData entry with the highest normalized
 11414  // score.
 11415  func (a *AllocMetric) MaxNormScore() *NodeScoreMeta {
 11416  	if a == nil || len(a.ScoreMetaData) == 0 {
 11417  		return nil
 11418  	}
 11419  	return a.ScoreMetaData[0]
 11420  }
 11421  
 11422  // NodeScoreMeta captures scoring meta data derived from
 11423  // different scoring factors.
 11424  type NodeScoreMeta struct {
 11425  	NodeID    string
 11426  	Scores    map[string]float64
 11427  	NormScore float64
 11428  }
 11429  
 11430  func (s *NodeScoreMeta) Copy() *NodeScoreMeta {
 11431  	if s == nil {
 11432  		return nil
 11433  	}
 11434  	ns := new(NodeScoreMeta)
 11435  	*ns = *s
 11436  	return ns
 11437  }
 11438  
 11439  func (s *NodeScoreMeta) String() string {
 11440  	return fmt.Sprintf("%s %f %v", s.NodeID, s.NormScore, s.Scores)
 11441  }
 11442  
 11443  func (s *NodeScoreMeta) Score() float64 {
 11444  	return s.NormScore
 11445  }
 11446  
 11447  func (s *NodeScoreMeta) Data() interface{} {
 11448  	return s
 11449  }
 11450  
 11451  // AllocNetworkStatus captures the status of an allocation's network during runtime.
 11452  // Depending on the network mode, an allocation's address may need to be known to other
 11453  // systems in Nomad such as service registration.
 11454  type AllocNetworkStatus struct {
 11455  	InterfaceName string
 11456  	Address       string
 11457  	DNS           *DNSConfig
 11458  }
 11459  
 11460  func (a *AllocNetworkStatus) Copy() *AllocNetworkStatus {
 11461  	if a == nil {
 11462  		return nil
 11463  	}
 11464  	return &AllocNetworkStatus{
 11465  		InterfaceName: a.InterfaceName,
 11466  		Address:       a.Address,
 11467  		DNS:           a.DNS.Copy(),
 11468  	}
 11469  }
 11470  
 11471  func (a *AllocNetworkStatus) Equal(o *AllocNetworkStatus) bool {
 11472  	// note: this accounts for when DNSConfig is non-nil but empty
 11473  	switch {
 11474  	case a == nil && o.IsZero():
 11475  		return true
 11476  	case o == nil && a.IsZero():
 11477  		return true
 11478  	case a == nil || o == nil:
 11479  		return a == o
 11480  	}
 11481  
 11482  	switch {
 11483  	case a.InterfaceName != o.InterfaceName:
 11484  		return false
 11485  	case a.Address != o.Address:
 11486  		return false
 11487  	case !a.DNS.Equal(o.DNS):
 11488  		return false
 11489  	}
 11490  	return true
 11491  }
 11492  
 11493  func (a *AllocNetworkStatus) IsZero() bool {
 11494  	if a == nil {
 11495  		return true
 11496  	}
 11497  	if a.InterfaceName != "" || a.Address != "" {
 11498  		return false
 11499  	}
 11500  	if !a.DNS.IsZero() {
 11501  		return false
 11502  	}
 11503  	return true
 11504  }
 11505  
 11506  // NetworkStatus is an interface satisfied by alloc runner, for acquiring the
 11507  // network status of an allocation.
 11508  type NetworkStatus interface {
 11509  	NetworkStatus() *AllocNetworkStatus
 11510  }
 11511  
 11512  // AllocDeploymentStatus captures the status of the allocation as part of the
 11513  // deployment. This can include things like if the allocation has been marked as
 11514  // healthy.
 11515  type AllocDeploymentStatus struct {
 11516  	// Healthy marks whether the allocation has been marked healthy or unhealthy
 11517  	// as part of a deployment. It can be unset if it has neither been marked
 11518  	// healthy or unhealthy.
 11519  	Healthy *bool
 11520  
 11521  	// Timestamp is the time at which the health status was set.
 11522  	Timestamp time.Time
 11523  
 11524  	// Canary marks whether the allocation is a canary or not. A canary that has
 11525  	// been promoted will have this field set to false.
 11526  	Canary bool
 11527  
 11528  	// ModifyIndex is the raft index in which the deployment status was last
 11529  	// changed.
 11530  	ModifyIndex uint64
 11531  }
 11532  
 11533  // HasHealth returns true if the allocation has its health set.
 11534  func (a *AllocDeploymentStatus) HasHealth() bool {
 11535  	return a != nil && a.Healthy != nil
 11536  }
 11537  
 11538  // IsHealthy returns if the allocation is marked as healthy as part of a
 11539  // deployment
 11540  func (a *AllocDeploymentStatus) IsHealthy() bool {
 11541  	if a == nil {
 11542  		return false
 11543  	}
 11544  
 11545  	return a.Healthy != nil && *a.Healthy
 11546  }
 11547  
 11548  // IsUnhealthy returns if the allocation is marked as unhealthy as part of a
 11549  // deployment
 11550  func (a *AllocDeploymentStatus) IsUnhealthy() bool {
 11551  	if a == nil {
 11552  		return false
 11553  	}
 11554  
 11555  	return a.Healthy != nil && !*a.Healthy
 11556  }
 11557  
 11558  // IsCanary returns if the allocation is marked as a canary
 11559  func (a *AllocDeploymentStatus) IsCanary() bool {
 11560  	if a == nil {
 11561  		return false
 11562  	}
 11563  
 11564  	return a.Canary
 11565  }
 11566  
 11567  func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus {
 11568  	if a == nil {
 11569  		return nil
 11570  	}
 11571  
 11572  	c := new(AllocDeploymentStatus)
 11573  	*c = *a
 11574  
 11575  	if a.Healthy != nil {
 11576  		c.Healthy = pointer.Of(*a.Healthy)
 11577  	}
 11578  
 11579  	return c
 11580  }
 11581  
 11582  func (a *AllocDeploymentStatus) Equal(o *AllocDeploymentStatus) bool {
 11583  	if a == nil || o == nil {
 11584  		return a == o
 11585  	}
 11586  
 11587  	switch {
 11588  	case !pointer.Eq(a.Healthy, o.Healthy):
 11589  		return false
 11590  	case a.Timestamp != o.Timestamp:
 11591  		return false
 11592  	case a.Canary != o.Canary:
 11593  		return false
 11594  	case a.ModifyIndex != o.ModifyIndex:
 11595  		return false
 11596  	}
 11597  	return true
 11598  }
 11599  
 11600  const (
 11601  	EvalStatusBlocked   = "blocked"
 11602  	EvalStatusPending   = "pending"
 11603  	EvalStatusComplete  = "complete"
 11604  	EvalStatusFailed    = "failed"
 11605  	EvalStatusCancelled = "canceled"
 11606  )
 11607  
 11608  const (
 11609  	EvalTriggerJobRegister          = "job-register"
 11610  	EvalTriggerJobDeregister        = "job-deregister"
 11611  	EvalTriggerPeriodicJob          = "periodic-job"
 11612  	EvalTriggerNodeDrain            = "node-drain"
 11613  	EvalTriggerNodeUpdate           = "node-update"
 11614  	EvalTriggerAllocStop            = "alloc-stop"
 11615  	EvalTriggerScheduled            = "scheduled"
 11616  	EvalTriggerRollingUpdate        = "rolling-update"
 11617  	EvalTriggerDeploymentWatcher    = "deployment-watcher"
 11618  	EvalTriggerFailedFollowUp       = "failed-follow-up"
 11619  	EvalTriggerMaxPlans             = "max-plan-attempts"
 11620  	EvalTriggerRetryFailedAlloc     = "alloc-failure"
 11621  	EvalTriggerQueuedAllocs         = "queued-allocs"
 11622  	EvalTriggerPreemption           = "preemption"
 11623  	EvalTriggerScaling              = "job-scaling"
 11624  	EvalTriggerMaxDisconnectTimeout = "max-disconnect-timeout"
 11625  	EvalTriggerReconnect            = "reconnect"
 11626  )
 11627  
 11628  const (
 11629  	// CoreJobEvalGC is used for the garbage collection of evaluations
 11630  	// and allocations. We periodically scan evaluations in a terminal state,
 11631  	// in which all the corresponding allocations are also terminal. We
 11632  	// delete these out of the system to bound the state.
 11633  	CoreJobEvalGC = "eval-gc"
 11634  
 11635  	// CoreJobNodeGC is used for the garbage collection of failed nodes.
 11636  	// We periodically scan nodes in a terminal state, and if they have no
 11637  	// corresponding allocations we delete these out of the system.
 11638  	CoreJobNodeGC = "node-gc"
 11639  
 11640  	// CoreJobJobGC is used for the garbage collection of eligible jobs. We
 11641  	// periodically scan garbage collectible jobs and check if both their
 11642  	// evaluations and allocations are terminal. If so, we delete these out of
 11643  	// the system.
 11644  	CoreJobJobGC = "job-gc"
 11645  
 11646  	// CoreJobDeploymentGC is used for the garbage collection of eligible
 11647  	// deployments. We periodically scan garbage collectible deployments and
 11648  	// check if they are terminal. If so, we delete these out of the system.
 11649  	CoreJobDeploymentGC = "deployment-gc"
 11650  
 11651  	// CoreJobCSIVolumeClaimGC is use for the garbage collection of CSI
 11652  	// volume claims. We periodically scan volumes to see if no allocs are
 11653  	// claiming them. If so, we unclaim the volume.
 11654  	CoreJobCSIVolumeClaimGC = "csi-volume-claim-gc"
 11655  
 11656  	// CoreJobCSIPluginGC is use for the garbage collection of CSI plugins.
 11657  	// We periodically scan plugins to see if they have no associated volumes
 11658  	// or allocs running them. If so, we delete the plugin.
 11659  	CoreJobCSIPluginGC = "csi-plugin-gc"
 11660  
 11661  	// CoreJobOneTimeTokenGC is use for the garbage collection of one-time
 11662  	// tokens. We periodically scan for expired tokens and delete them.
 11663  	CoreJobOneTimeTokenGC = "one-time-token-gc"
 11664  
 11665  	// CoreJobLocalTokenExpiredGC is used for the garbage collection of
 11666  	// expired local ACL tokens. We periodically scan for expired tokens and
 11667  	// delete them.
 11668  	CoreJobLocalTokenExpiredGC = "local-token-expired-gc"
 11669  
 11670  	// CoreJobGlobalTokenExpiredGC is used for the garbage collection of
 11671  	// expired global ACL tokens. We periodically scan for expired tokens and
 11672  	// delete them.
 11673  	CoreJobGlobalTokenExpiredGC = "global-token-expired-gc"
 11674  
 11675  	// CoreJobRootKeyRotateGC is used for periodic key rotation and
 11676  	// garbage collection of unused encryption keys.
 11677  	CoreJobRootKeyRotateOrGC = "root-key-rotate-gc"
 11678  
 11679  	// CoreJobVariablesRekey is used to fully rotate the encryption keys for
 11680  	// variables by decrypting all variables and re-encrypting them with the
 11681  	// active key
 11682  	CoreJobVariablesRekey = "variables-rekey"
 11683  
 11684  	// CoreJobForceGC is used to force garbage collection of all GCable objects.
 11685  	CoreJobForceGC = "force-gc"
 11686  )
 11687  
 11688  // Evaluation is used anytime we need to apply business logic as a result
 11689  // of a change to our desired state (job specification) or the emergent state
 11690  // (registered nodes). When the inputs change, we need to "evaluate" them,
 11691  // potentially taking action (allocation of work) or doing nothing if the state
 11692  // of the world does not require it.
 11693  type Evaluation struct {
 11694  	// msgpack omit empty fields during serialization
 11695  	_struct bool `codec:",omitempty"` // nolint: structcheck
 11696  
 11697  	// ID is a randomly generated UUID used for this evaluation. This
 11698  	// is assigned upon the creation of the evaluation.
 11699  	ID string
 11700  
 11701  	// Namespace is the namespace the evaluation is created in
 11702  	Namespace string
 11703  
 11704  	// Priority is used to control scheduling importance and if this job
 11705  	// can preempt other jobs.
 11706  	Priority int
 11707  
 11708  	// Type is used to control which schedulers are available to handle
 11709  	// this evaluation.
 11710  	Type string
 11711  
 11712  	// TriggeredBy is used to give some insight into why this Eval
 11713  	// was created. (Job change, node failure, alloc failure, etc).
 11714  	TriggeredBy string
 11715  
 11716  	// JobID is the job this evaluation is scoped to. Evaluations cannot
 11717  	// be run in parallel for a given JobID, so we serialize on this.
 11718  	JobID string
 11719  
 11720  	// JobModifyIndex is the modify index of the job at the time
 11721  	// the evaluation was created
 11722  	JobModifyIndex uint64
 11723  
 11724  	// NodeID is the node that was affected triggering the evaluation.
 11725  	NodeID string
 11726  
 11727  	// NodeModifyIndex is the modify index of the node at the time
 11728  	// the evaluation was created
 11729  	NodeModifyIndex uint64
 11730  
 11731  	// DeploymentID is the ID of the deployment that triggered the evaluation.
 11732  	DeploymentID string
 11733  
 11734  	// Status of the evaluation
 11735  	Status string
 11736  
 11737  	// StatusDescription is meant to provide more human useful information
 11738  	StatusDescription string
 11739  
 11740  	// Wait is a minimum wait time for running the eval. This is used to
 11741  	// support a rolling upgrade in versions prior to 0.7.0
 11742  	// Deprecated
 11743  	Wait time.Duration
 11744  
 11745  	// WaitUntil is the time when this eval should be run. This is used to
 11746  	// supported delayed rescheduling of failed allocations, and delayed
 11747  	// stopping of allocations that are configured with max_client_disconnect.
 11748  	WaitUntil time.Time
 11749  
 11750  	// NextEval is the evaluation ID for the eval created to do a followup.
 11751  	// This is used to support rolling upgrades and failed-follow-up evals, where
 11752  	// we need a chain of evaluations.
 11753  	NextEval string
 11754  
 11755  	// PreviousEval is the evaluation ID for the eval creating this one to do a followup.
 11756  	// This is used to support rolling upgrades and failed-follow-up evals, where
 11757  	// we need a chain of evaluations.
 11758  	PreviousEval string
 11759  
 11760  	// BlockedEval is the evaluation ID for a created blocked eval. A
 11761  	// blocked eval will be created if all allocations could not be placed due
 11762  	// to constraints or lacking resources.
 11763  	BlockedEval string
 11764  
 11765  	// RelatedEvals is a list of all the evaluations that are related (next,
 11766  	// previous, or blocked) to this one. It may be nil if not requested.
 11767  	RelatedEvals []*EvaluationStub
 11768  
 11769  	// FailedTGAllocs are task groups which have allocations that could not be
 11770  	// made, but the metrics are persisted so that the user can use the feedback
 11771  	// to determine the cause.
 11772  	FailedTGAllocs map[string]*AllocMetric
 11773  
 11774  	// ClassEligibility tracks computed node classes that have been explicitly
 11775  	// marked as eligible or ineligible.
 11776  	ClassEligibility map[string]bool
 11777  
 11778  	// QuotaLimitReached marks whether a quota limit was reached for the
 11779  	// evaluation.
 11780  	QuotaLimitReached string
 11781  
 11782  	// EscapedComputedClass marks whether the job has constraints that are not
 11783  	// captured by computed node classes.
 11784  	EscapedComputedClass bool
 11785  
 11786  	// AnnotatePlan triggers the scheduler to provide additional annotations
 11787  	// during the evaluation. This should not be set during normal operations.
 11788  	AnnotatePlan bool
 11789  
 11790  	// QueuedAllocations is the number of unplaced allocations at the time the
 11791  	// evaluation was processed. The map is keyed by Task Group names.
 11792  	QueuedAllocations map[string]int
 11793  
 11794  	// LeaderACL provides the ACL token to when issuing RPCs back to the
 11795  	// leader. This will be a valid management token as long as the leader is
 11796  	// active. This should not ever be exposed via the API.
 11797  	LeaderACL string
 11798  
 11799  	// SnapshotIndex is the Raft index of the snapshot used to process the
 11800  	// evaluation. The index will either be set when it has gone through the
 11801  	// scheduler or if a blocked evaluation is being created. The index is set
 11802  	// in this case so we can determine if an early unblocking is required since
 11803  	// capacity has changed since the evaluation was created. This can result in
 11804  	// the SnapshotIndex being less than the CreateIndex.
 11805  	SnapshotIndex uint64
 11806  
 11807  	// Raft Indexes
 11808  	CreateIndex uint64
 11809  	ModifyIndex uint64
 11810  
 11811  	CreateTime int64
 11812  	ModifyTime int64
 11813  }
 11814  
 11815  type EvaluationStub struct {
 11816  	ID                string
 11817  	Namespace         string
 11818  	Priority          int
 11819  	Type              string
 11820  	TriggeredBy       string
 11821  	JobID             string
 11822  	NodeID            string
 11823  	DeploymentID      string
 11824  	Status            string
 11825  	StatusDescription string
 11826  	WaitUntil         time.Time
 11827  	NextEval          string
 11828  	PreviousEval      string
 11829  	BlockedEval       string
 11830  	CreateIndex       uint64
 11831  	ModifyIndex       uint64
 11832  	CreateTime        int64
 11833  	ModifyTime        int64
 11834  }
 11835  
 11836  // GetID implements the IDGetter interface, required for pagination.
 11837  func (e *Evaluation) GetID() string {
 11838  	if e == nil {
 11839  		return ""
 11840  	}
 11841  	return e.ID
 11842  }
 11843  
 11844  // GetNamespace implements the NamespaceGetter interface, required for pagination.
 11845  func (e *Evaluation) GetNamespace() string {
 11846  	if e == nil {
 11847  		return ""
 11848  	}
 11849  	return e.Namespace
 11850  }
 11851  
 11852  // GetCreateIndex implements the CreateIndexGetter interface, required for
 11853  // pagination.
 11854  func (e *Evaluation) GetCreateIndex() uint64 {
 11855  	if e == nil {
 11856  		return 0
 11857  	}
 11858  	return e.CreateIndex
 11859  }
 11860  
 11861  // TerminalStatus returns if the current status is terminal and
 11862  // will no longer transition.
 11863  func (e *Evaluation) TerminalStatus() bool {
 11864  	switch e.Status {
 11865  	case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled:
 11866  		return true
 11867  	default:
 11868  		return false
 11869  	}
 11870  }
 11871  
 11872  func (e *Evaluation) GoString() string {
 11873  	return fmt.Sprintf("<Eval %q JobID: %q Namespace: %q>", e.ID, e.JobID, e.Namespace)
 11874  }
 11875  
 11876  func (e *Evaluation) RelatedIDs() []string {
 11877  	if e == nil {
 11878  		return nil
 11879  	}
 11880  
 11881  	ids := []string{e.NextEval, e.PreviousEval, e.BlockedEval}
 11882  	related := make([]string, 0, len(ids))
 11883  
 11884  	for _, id := range ids {
 11885  		if id != "" {
 11886  			related = append(related, id)
 11887  		}
 11888  	}
 11889  
 11890  	return related
 11891  }
 11892  
 11893  func (e *Evaluation) Stub() *EvaluationStub {
 11894  	if e == nil {
 11895  		return nil
 11896  	}
 11897  
 11898  	return &EvaluationStub{
 11899  		ID:                e.ID,
 11900  		Namespace:         e.Namespace,
 11901  		Priority:          e.Priority,
 11902  		Type:              e.Type,
 11903  		TriggeredBy:       e.TriggeredBy,
 11904  		JobID:             e.JobID,
 11905  		NodeID:            e.NodeID,
 11906  		DeploymentID:      e.DeploymentID,
 11907  		Status:            e.Status,
 11908  		StatusDescription: e.StatusDescription,
 11909  		WaitUntil:         e.WaitUntil,
 11910  		NextEval:          e.NextEval,
 11911  		PreviousEval:      e.PreviousEval,
 11912  		BlockedEval:       e.BlockedEval,
 11913  		CreateIndex:       e.CreateIndex,
 11914  		ModifyIndex:       e.ModifyIndex,
 11915  		CreateTime:        e.CreateTime,
 11916  		ModifyTime:        e.ModifyTime,
 11917  	}
 11918  }
 11919  
 11920  func (e *Evaluation) Copy() *Evaluation {
 11921  	if e == nil {
 11922  		return nil
 11923  	}
 11924  	ne := new(Evaluation)
 11925  	*ne = *e
 11926  
 11927  	// Copy ClassEligibility
 11928  	if e.ClassEligibility != nil {
 11929  		classes := make(map[string]bool, len(e.ClassEligibility))
 11930  		for class, elig := range e.ClassEligibility {
 11931  			classes[class] = elig
 11932  		}
 11933  		ne.ClassEligibility = classes
 11934  	}
 11935  
 11936  	// Copy FailedTGAllocs
 11937  	if e.FailedTGAllocs != nil {
 11938  		failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs))
 11939  		for tg, metric := range e.FailedTGAllocs {
 11940  			failedTGs[tg] = metric.Copy()
 11941  		}
 11942  		ne.FailedTGAllocs = failedTGs
 11943  	}
 11944  
 11945  	// Copy queued allocations
 11946  	if e.QueuedAllocations != nil {
 11947  		queuedAllocations := make(map[string]int, len(e.QueuedAllocations))
 11948  		for tg, num := range e.QueuedAllocations {
 11949  			queuedAllocations[tg] = num
 11950  		}
 11951  		ne.QueuedAllocations = queuedAllocations
 11952  	}
 11953  
 11954  	return ne
 11955  }
 11956  
 11957  // ShouldEnqueue checks if a given evaluation should be enqueued into the
 11958  // eval_broker
 11959  func (e *Evaluation) ShouldEnqueue() bool {
 11960  	switch e.Status {
 11961  	case EvalStatusPending:
 11962  		return true
 11963  	case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled:
 11964  		return false
 11965  	default:
 11966  		panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
 11967  	}
 11968  }
 11969  
 11970  // ShouldBlock checks if a given evaluation should be entered into the blocked
 11971  // eval tracker.
 11972  func (e *Evaluation) ShouldBlock() bool {
 11973  	switch e.Status {
 11974  	case EvalStatusBlocked:
 11975  		return true
 11976  	case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled:
 11977  		return false
 11978  	default:
 11979  		panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
 11980  	}
 11981  }
 11982  
 11983  // MakePlan is used to make a plan from the given evaluation
 11984  // for a given Job
 11985  func (e *Evaluation) MakePlan(j *Job) *Plan {
 11986  	p := &Plan{
 11987  		EvalID:          e.ID,
 11988  		Priority:        e.Priority,
 11989  		Job:             j,
 11990  		NodeUpdate:      make(map[string][]*Allocation),
 11991  		NodeAllocation:  make(map[string][]*Allocation),
 11992  		NodePreemptions: make(map[string][]*Allocation),
 11993  	}
 11994  	if j != nil {
 11995  		p.AllAtOnce = j.AllAtOnce
 11996  	}
 11997  	return p
 11998  }
 11999  
 12000  // NextRollingEval creates an evaluation to followup this eval for rolling updates
 12001  func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
 12002  	now := time.Now().UTC().UnixNano()
 12003  	return &Evaluation{
 12004  		ID:             uuid.Generate(),
 12005  		Namespace:      e.Namespace,
 12006  		Priority:       e.Priority,
 12007  		Type:           e.Type,
 12008  		TriggeredBy:    EvalTriggerRollingUpdate,
 12009  		JobID:          e.JobID,
 12010  		JobModifyIndex: e.JobModifyIndex,
 12011  		Status:         EvalStatusPending,
 12012  		Wait:           wait,
 12013  		PreviousEval:   e.ID,
 12014  		CreateTime:     now,
 12015  		ModifyTime:     now,
 12016  	}
 12017  }
 12018  
 12019  // CreateBlockedEval creates a blocked evaluation to followup this eval to place any
 12020  // failed allocations. It takes the classes marked explicitly eligible or
 12021  // ineligible, whether the job has escaped computed node classes and whether the
 12022  // quota limit was reached.
 12023  func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool,
 12024  	escaped bool, quotaReached string, failedTGAllocs map[string]*AllocMetric) *Evaluation {
 12025  	now := time.Now().UTC().UnixNano()
 12026  	return &Evaluation{
 12027  		ID:                   uuid.Generate(),
 12028  		Namespace:            e.Namespace,
 12029  		Priority:             e.Priority,
 12030  		Type:                 e.Type,
 12031  		TriggeredBy:          EvalTriggerQueuedAllocs,
 12032  		JobID:                e.JobID,
 12033  		JobModifyIndex:       e.JobModifyIndex,
 12034  		Status:               EvalStatusBlocked,
 12035  		PreviousEval:         e.ID,
 12036  		FailedTGAllocs:       failedTGAllocs,
 12037  		ClassEligibility:     classEligibility,
 12038  		EscapedComputedClass: escaped,
 12039  		QuotaLimitReached:    quotaReached,
 12040  		CreateTime:           now,
 12041  		ModifyTime:           now,
 12042  	}
 12043  }
 12044  
 12045  // CreateFailedFollowUpEval creates a follow up evaluation when the current one
 12046  // has been marked as failed because it has hit the delivery limit and will not
 12047  // be retried by the eval_broker. Callers should copy the created eval's ID to
 12048  // into the old eval's NextEval field.
 12049  func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
 12050  	now := time.Now().UTC().UnixNano()
 12051  	return &Evaluation{
 12052  		ID:             uuid.Generate(),
 12053  		Namespace:      e.Namespace,
 12054  		Priority:       e.Priority,
 12055  		Type:           e.Type,
 12056  		TriggeredBy:    EvalTriggerFailedFollowUp,
 12057  		JobID:          e.JobID,
 12058  		JobModifyIndex: e.JobModifyIndex,
 12059  		Status:         EvalStatusPending,
 12060  		Wait:           wait,
 12061  		PreviousEval:   e.ID,
 12062  		CreateTime:     now,
 12063  		ModifyTime:     now,
 12064  	}
 12065  }
 12066  
 12067  // UpdateModifyTime takes into account that clocks on different servers may be
 12068  // slightly out of sync. Even in case of a leader change, this method will
 12069  // guarantee that ModifyTime will always be after CreateTime.
 12070  func (e *Evaluation) UpdateModifyTime() {
 12071  	now := time.Now().UTC().UnixNano()
 12072  	if now <= e.CreateTime {
 12073  		e.ModifyTime = e.CreateTime + 1
 12074  	} else {
 12075  		e.ModifyTime = now
 12076  	}
 12077  }
 12078  
 12079  // Plan is used to submit a commit plan for task allocations. These
 12080  // are submitted to the leader which verifies that resources have
 12081  // not been overcommitted before admitting the plan.
 12082  type Plan struct {
 12083  	// msgpack omit empty fields during serialization
 12084  	_struct bool `codec:",omitempty"` // nolint: structcheck
 12085  
 12086  	// EvalID is the evaluation ID this plan is associated with
 12087  	EvalID string
 12088  
 12089  	// EvalToken is used to prevent a split-brain processing of
 12090  	// an evaluation. There should only be a single scheduler running
 12091  	// an Eval at a time, but this could be violated after a leadership
 12092  	// transition. This unique token is used to reject plans that are
 12093  	// being submitted from a different leader.
 12094  	EvalToken string
 12095  
 12096  	// Priority is the priority of the upstream job
 12097  	Priority int
 12098  
 12099  	// AllAtOnce is used to control if incremental scheduling of task groups
 12100  	// is allowed or if we must do a gang scheduling of the entire job.
 12101  	// If this is false, a plan may be partially applied. Otherwise, the
 12102  	// entire plan must be able to make progress.
 12103  	AllAtOnce bool
 12104  
 12105  	// Job is the parent job of all the allocations in the Plan.
 12106  	// Since a Plan only involves a single Job, we can reduce the size
 12107  	// of the plan by only including it once.
 12108  	Job *Job
 12109  
 12110  	// NodeUpdate contains all the allocations to be stopped or evicted for
 12111  	// each node.
 12112  	NodeUpdate map[string][]*Allocation
 12113  
 12114  	// NodeAllocation contains all the allocations for each node.
 12115  	// The evicts must be considered prior to the allocations.
 12116  	NodeAllocation map[string][]*Allocation
 12117  
 12118  	// Annotations contains annotations by the scheduler to be used by operators
 12119  	// to understand the decisions made by the scheduler.
 12120  	Annotations *PlanAnnotations
 12121  
 12122  	// Deployment is the deployment created or updated by the scheduler that
 12123  	// should be applied by the planner.
 12124  	Deployment *Deployment
 12125  
 12126  	// DeploymentUpdates is a set of status updates to apply to the given
 12127  	// deployments. This allows the scheduler to cancel any unneeded deployment
 12128  	// because the job is stopped or the update block is removed.
 12129  	DeploymentUpdates []*DeploymentStatusUpdate
 12130  
 12131  	// NodePreemptions is a map from node id to a set of allocations from other
 12132  	// lower priority jobs that are preempted. Preempted allocations are marked
 12133  	// as evicted.
 12134  	NodePreemptions map[string][]*Allocation
 12135  
 12136  	// SnapshotIndex is the Raft index of the snapshot used to create the
 12137  	// Plan. The leader will wait to evaluate the plan until its StateStore
 12138  	// has reached at least this index.
 12139  	SnapshotIndex uint64
 12140  }
 12141  
 12142  func (p *Plan) GoString() string {
 12143  	out := fmt.Sprintf("(eval %s", p.EvalID[:8])
 12144  	if p.Job != nil {
 12145  		out += fmt.Sprintf(", job %s", p.Job.ID)
 12146  	}
 12147  	if p.Deployment != nil {
 12148  		out += fmt.Sprintf(", deploy %s", p.Deployment.ID[:8])
 12149  	}
 12150  	if len(p.NodeUpdate) > 0 {
 12151  		out += ", NodeUpdates: "
 12152  		for node, allocs := range p.NodeUpdate {
 12153  			out += fmt.Sprintf("(node[%s]", node[:8])
 12154  			for _, alloc := range allocs {
 12155  				out += fmt.Sprintf(" (%s stop/evict)", alloc.ID[:8])
 12156  			}
 12157  			out += ")"
 12158  		}
 12159  	}
 12160  	if len(p.NodeAllocation) > 0 {
 12161  		out += ", NodeAllocations: "
 12162  		for node, allocs := range p.NodeAllocation {
 12163  			out += fmt.Sprintf("(node[%s]", node[:8])
 12164  			for _, alloc := range allocs {
 12165  				out += fmt.Sprintf(" (%s %s %s)",
 12166  					alloc.ID[:8], alloc.Name, alloc.DesiredStatus,
 12167  				)
 12168  			}
 12169  			out += ")"
 12170  		}
 12171  	}
 12172  	if len(p.NodePreemptions) > 0 {
 12173  		out += ", NodePreemptions: "
 12174  		for node, allocs := range p.NodePreemptions {
 12175  			out += fmt.Sprintf("(node[%s]", node[:8])
 12176  			for _, alloc := range allocs {
 12177  				out += fmt.Sprintf(" (%s %s %s)",
 12178  					alloc.ID[:8], alloc.Name, alloc.DesiredStatus,
 12179  				)
 12180  			}
 12181  			out += ")"
 12182  		}
 12183  	}
 12184  	if len(p.DeploymentUpdates) > 0 {
 12185  		out += ", DeploymentUpdates: "
 12186  		for _, dupdate := range p.DeploymentUpdates {
 12187  			out += fmt.Sprintf("(%s %s)",
 12188  				dupdate.DeploymentID[:8], dupdate.Status)
 12189  		}
 12190  	}
 12191  	if p.Annotations != nil {
 12192  		out += ", Annotations: "
 12193  		for tg, updates := range p.Annotations.DesiredTGUpdates {
 12194  			out += fmt.Sprintf("(update[%s] %v)", tg, updates)
 12195  		}
 12196  		for _, preempted := range p.Annotations.PreemptedAllocs {
 12197  			out += fmt.Sprintf("(preempt %s)", preempted.ID[:8])
 12198  		}
 12199  	}
 12200  
 12201  	out += ")"
 12202  	return out
 12203  }
 12204  
 12205  // AppendStoppedAlloc marks an allocation to be stopped. The clientStatus of the
 12206  // allocation may be optionally set by passing in a non-empty value.
 12207  func (p *Plan) AppendStoppedAlloc(alloc *Allocation, desiredDesc, clientStatus, followupEvalID string) {
 12208  	newAlloc := new(Allocation)
 12209  	*newAlloc = *alloc
 12210  
 12211  	// If the job is not set in the plan we are deregistering a job so we
 12212  	// extract the job from the allocation.
 12213  	if p.Job == nil && newAlloc.Job != nil {
 12214  		p.Job = newAlloc.Job
 12215  	}
 12216  
 12217  	// Normalize the job
 12218  	newAlloc.Job = nil
 12219  
 12220  	// Strip the resources as it can be rebuilt.
 12221  	newAlloc.Resources = nil
 12222  
 12223  	newAlloc.DesiredStatus = AllocDesiredStatusStop
 12224  	newAlloc.DesiredDescription = desiredDesc
 12225  
 12226  	if clientStatus != "" {
 12227  		newAlloc.ClientStatus = clientStatus
 12228  	}
 12229  
 12230  	newAlloc.AppendState(AllocStateFieldClientStatus, clientStatus)
 12231  
 12232  	if followupEvalID != "" {
 12233  		newAlloc.FollowupEvalID = followupEvalID
 12234  	}
 12235  
 12236  	node := alloc.NodeID
 12237  	existing := p.NodeUpdate[node]
 12238  	p.NodeUpdate[node] = append(existing, newAlloc)
 12239  }
 12240  
 12241  // AppendPreemptedAlloc is used to append an allocation that's being preempted to the plan.
 12242  // To minimize the size of the plan, this only sets a minimal set of fields in the allocation
 12243  func (p *Plan) AppendPreemptedAlloc(alloc *Allocation, preemptingAllocID string) {
 12244  	newAlloc := &Allocation{}
 12245  	newAlloc.ID = alloc.ID
 12246  	newAlloc.JobID = alloc.JobID
 12247  	newAlloc.Namespace = alloc.Namespace
 12248  	newAlloc.DesiredStatus = AllocDesiredStatusEvict
 12249  	newAlloc.PreemptedByAllocation = preemptingAllocID
 12250  
 12251  	desiredDesc := fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID)
 12252  	newAlloc.DesiredDescription = desiredDesc
 12253  
 12254  	// TaskResources are needed by the plan applier to check if allocations fit
 12255  	// after removing preempted allocations
 12256  	if alloc.AllocatedResources != nil {
 12257  		newAlloc.AllocatedResources = alloc.AllocatedResources
 12258  	} else {
 12259  		// COMPAT Remove in version 0.11
 12260  		newAlloc.TaskResources = alloc.TaskResources
 12261  		newAlloc.SharedResources = alloc.SharedResources
 12262  	}
 12263  
 12264  	// Append this alloc to slice for this node
 12265  	node := alloc.NodeID
 12266  	existing := p.NodePreemptions[node]
 12267  	p.NodePreemptions[node] = append(existing, newAlloc)
 12268  }
 12269  
 12270  // AppendUnknownAlloc marks an allocation as unknown.
 12271  func (p *Plan) AppendUnknownAlloc(alloc *Allocation) {
 12272  	// Strip the resources as they can be rebuilt.
 12273  	alloc.Resources = nil
 12274  
 12275  	existing := p.NodeAllocation[alloc.NodeID]
 12276  	p.NodeAllocation[alloc.NodeID] = append(existing, alloc)
 12277  }
 12278  
 12279  func (p *Plan) PopUpdate(alloc *Allocation) {
 12280  	existing := p.NodeUpdate[alloc.NodeID]
 12281  	n := len(existing)
 12282  	if n > 0 && existing[n-1].ID == alloc.ID {
 12283  		existing = existing[:n-1]
 12284  		if len(existing) > 0 {
 12285  			p.NodeUpdate[alloc.NodeID] = existing
 12286  		} else {
 12287  			delete(p.NodeUpdate, alloc.NodeID)
 12288  		}
 12289  	}
 12290  }
 12291  
 12292  // AppendAlloc appends the alloc to the plan allocations.
 12293  // Uses the passed job if explicitly passed, otherwise
 12294  // it is assumed the alloc will use the plan Job version.
 12295  func (p *Plan) AppendAlloc(alloc *Allocation, job *Job) {
 12296  	node := alloc.NodeID
 12297  	existing := p.NodeAllocation[node]
 12298  
 12299  	alloc.Job = job
 12300  
 12301  	p.NodeAllocation[node] = append(existing, alloc)
 12302  }
 12303  
 12304  // IsNoOp checks if this plan would do nothing
 12305  func (p *Plan) IsNoOp() bool {
 12306  	return len(p.NodeUpdate) == 0 &&
 12307  		len(p.NodeAllocation) == 0 &&
 12308  		p.Deployment == nil &&
 12309  		len(p.DeploymentUpdates) == 0
 12310  }
 12311  
 12312  // NormalizeAllocations normalizes allocations to remove fields that can
 12313  // be fetched from the MemDB instead of sending over the wire
 12314  func (p *Plan) NormalizeAllocations() {
 12315  	for _, allocs := range p.NodeUpdate {
 12316  		for i, alloc := range allocs {
 12317  			allocs[i] = &Allocation{
 12318  				ID:                 alloc.ID,
 12319  				DesiredDescription: alloc.DesiredDescription,
 12320  				ClientStatus:       alloc.ClientStatus,
 12321  				FollowupEvalID:     alloc.FollowupEvalID,
 12322  			}
 12323  		}
 12324  	}
 12325  
 12326  	for _, allocs := range p.NodePreemptions {
 12327  		for i, alloc := range allocs {
 12328  			allocs[i] = &Allocation{
 12329  				ID:                    alloc.ID,
 12330  				PreemptedByAllocation: alloc.PreemptedByAllocation,
 12331  			}
 12332  		}
 12333  	}
 12334  }
 12335  
 12336  // PlanResult is the result of a plan submitted to the leader.
 12337  type PlanResult struct {
 12338  	// NodeUpdate contains all the evictions and stops that were committed.
 12339  	NodeUpdate map[string][]*Allocation
 12340  
 12341  	// NodeAllocation contains all the allocations that were committed.
 12342  	NodeAllocation map[string][]*Allocation
 12343  
 12344  	// Deployment is the deployment that was committed.
 12345  	Deployment *Deployment
 12346  
 12347  	// DeploymentUpdates is the set of deployment updates that were committed.
 12348  	DeploymentUpdates []*DeploymentStatusUpdate
 12349  
 12350  	// NodePreemptions is a map from node id to a set of allocations from other
 12351  	// lower priority jobs that are preempted. Preempted allocations are marked
 12352  	// as stopped.
 12353  	NodePreemptions map[string][]*Allocation
 12354  
 12355  	// RejectedNodes are nodes the scheduler worker has rejected placements for
 12356  	// and should be considered for ineligibility by the plan applier to avoid
 12357  	// retrying them repeatedly.
 12358  	RejectedNodes []string
 12359  
 12360  	// IneligibleNodes are nodes the plan applier has repeatedly rejected
 12361  	// placements for and should therefore be considered ineligible by workers
 12362  	// to avoid retrying them repeatedly.
 12363  	IneligibleNodes []string
 12364  
 12365  	// RefreshIndex is the index the worker should refresh state up to.
 12366  	// This allows all evictions and allocations to be materialized.
 12367  	// If any allocations were rejected due to stale data (node state,
 12368  	// over committed) this can be used to force a worker refresh.
 12369  	RefreshIndex uint64
 12370  
 12371  	// AllocIndex is the Raft index in which the evictions and
 12372  	// allocations took place. This is used for the write index.
 12373  	AllocIndex uint64
 12374  }
 12375  
 12376  // IsNoOp checks if this plan result would do nothing
 12377  func (p *PlanResult) IsNoOp() bool {
 12378  	return len(p.IneligibleNodes) == 0 && len(p.NodeUpdate) == 0 &&
 12379  		len(p.NodeAllocation) == 0 && len(p.DeploymentUpdates) == 0 &&
 12380  		p.Deployment == nil
 12381  }
 12382  
 12383  // FullCommit is used to check if all the allocations in a plan
 12384  // were committed as part of the result. Returns if there was
 12385  // a match, and the number of expected and actual allocations.
 12386  func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
 12387  	expected := 0
 12388  	actual := 0
 12389  	for name, allocList := range plan.NodeAllocation {
 12390  		didAlloc := p.NodeAllocation[name]
 12391  		expected += len(allocList)
 12392  		actual += len(didAlloc)
 12393  	}
 12394  	return actual == expected, expected, actual
 12395  }
 12396  
 12397  // PlanAnnotations holds annotations made by the scheduler to give further debug
 12398  // information to operators.
 12399  type PlanAnnotations struct {
 12400  	// DesiredTGUpdates is the set of desired updates per task group.
 12401  	DesiredTGUpdates map[string]*DesiredUpdates
 12402  
 12403  	// PreemptedAllocs is the set of allocations to be preempted to make the placement successful.
 12404  	PreemptedAllocs []*AllocListStub
 12405  }
 12406  
 12407  // DesiredUpdates is the set of changes the scheduler would like to make given
 12408  // sufficient resources and cluster capacity.
 12409  type DesiredUpdates struct {
 12410  	Ignore            uint64
 12411  	Place             uint64
 12412  	Migrate           uint64
 12413  	Stop              uint64
 12414  	InPlaceUpdate     uint64
 12415  	DestructiveUpdate uint64
 12416  	Canary            uint64
 12417  	Preemptions       uint64
 12418  }
 12419  
 12420  func (d *DesiredUpdates) GoString() string {
 12421  	return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)",
 12422  		d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary)
 12423  }
 12424  
 12425  // msgpackHandle is a shared handle for encoding/decoding of structs
 12426  var MsgpackHandle = func() *codec.MsgpackHandle {
 12427  	h := &codec.MsgpackHandle{}
 12428  	h.RawToString = true
 12429  
 12430  	// maintain binary format from time prior to upgrading latest ugorji
 12431  	h.BasicHandle.TimeNotBuiltin = true
 12432  
 12433  	// Sets the default type for decoding a map into a nil interface{}.
 12434  	// This is necessary in particular because we store the driver configs as a
 12435  	// nil interface{}.
 12436  	h.MapType = reflect.TypeOf(map[string]interface{}(nil))
 12437  
 12438  	// only review struct codec tags
 12439  	h.TypeInfos = codec.NewTypeInfos([]string{"codec"})
 12440  
 12441  	return h
 12442  }()
 12443  
 12444  // Decode is used to decode a MsgPack encoded object
 12445  func Decode(buf []byte, out interface{}) error {
 12446  	return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
 12447  }
 12448  
 12449  // Encode is used to encode a MsgPack object with type prefix
 12450  func Encode(t MessageType, msg interface{}) ([]byte, error) {
 12451  	var buf bytes.Buffer
 12452  	buf.WriteByte(uint8(t))
 12453  	err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
 12454  	return buf.Bytes(), err
 12455  }
 12456  
 12457  // KeyringResponse is a unified key response and can be used for install,
 12458  // remove, use, as well as listing key queries.
 12459  type KeyringResponse struct {
 12460  	Messages map[string]string
 12461  	Keys     map[string]int
 12462  	NumNodes int
 12463  }
 12464  
 12465  // KeyringRequest is request objects for serf key operations.
 12466  type KeyringRequest struct {
 12467  	Key string
 12468  }
 12469  
 12470  // RecoverableError wraps an error and marks whether it is recoverable and could
 12471  // be retried or it is fatal.
 12472  type RecoverableError struct {
 12473  	Err         string
 12474  	Recoverable bool
 12475  	wrapped     error
 12476  }
 12477  
 12478  // NewRecoverableError is used to wrap an error and mark it as recoverable or
 12479  // not.
 12480  func NewRecoverableError(e error, recoverable bool) error {
 12481  	if e == nil {
 12482  		return nil
 12483  	}
 12484  
 12485  	return &RecoverableError{
 12486  		Err:         e.Error(),
 12487  		Recoverable: recoverable,
 12488  		wrapped:     e,
 12489  	}
 12490  }
 12491  
 12492  // WrapRecoverable wraps an existing error in a new RecoverableError with a new
 12493  // message. If the error was recoverable before the returned error is as well;
 12494  // otherwise it is unrecoverable.
 12495  func WrapRecoverable(msg string, err error) error {
 12496  	return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)}
 12497  }
 12498  
 12499  func (r *RecoverableError) Error() string {
 12500  	return r.Err
 12501  }
 12502  
 12503  func (r *RecoverableError) IsRecoverable() bool {
 12504  	return r.Recoverable
 12505  }
 12506  
 12507  func (r *RecoverableError) IsUnrecoverable() bool {
 12508  	return !r.Recoverable
 12509  }
 12510  
 12511  func (r *RecoverableError) Unwrap() error {
 12512  	return r.wrapped
 12513  }
 12514  
 12515  // Recoverable is an interface for errors to implement to indicate whether or
 12516  // not they are fatal or recoverable.
 12517  type Recoverable interface {
 12518  	error
 12519  	IsRecoverable() bool
 12520  }
 12521  
 12522  // IsRecoverable returns true if error is a RecoverableError with
 12523  // Recoverable=true. Otherwise false is returned.
 12524  func IsRecoverable(e error) bool {
 12525  	if re, ok := e.(Recoverable); ok {
 12526  		return re.IsRecoverable()
 12527  	}
 12528  	return false
 12529  }
 12530  
 12531  // WrappedServerError wraps an error and satisfies
 12532  // both the Recoverable and the ServerSideError interfaces
 12533  type WrappedServerError struct {
 12534  	Err error
 12535  }
 12536  
 12537  // NewWrappedServerError is used to create a wrapped server side error
 12538  func NewWrappedServerError(e error) error {
 12539  	return &WrappedServerError{
 12540  		Err: e,
 12541  	}
 12542  }
 12543  
 12544  func (r *WrappedServerError) IsRecoverable() bool {
 12545  	return IsRecoverable(r.Err)
 12546  }
 12547  
 12548  func (r *WrappedServerError) Error() string {
 12549  	return r.Err.Error()
 12550  }
 12551  
 12552  func (r *WrappedServerError) IsServerSide() bool {
 12553  	return true
 12554  }
 12555  
 12556  // ServerSideError is an interface for errors to implement to indicate
 12557  // errors occurring after the request makes it to a server
 12558  type ServerSideError interface {
 12559  	error
 12560  	IsServerSide() bool
 12561  }
 12562  
 12563  // IsServerSide returns true if error is a wrapped
 12564  // server side error
 12565  func IsServerSide(e error) bool {
 12566  	if se, ok := e.(ServerSideError); ok {
 12567  		return se.IsServerSide()
 12568  	}
 12569  	return false
 12570  }
 12571  
 12572  // ACLPolicy is used to represent an ACL policy
 12573  type ACLPolicy struct {
 12574  	Name        string      // Unique name
 12575  	Description string      // Human readable
 12576  	Rules       string      // HCL or JSON format
 12577  	RulesJSON   *acl.Policy // Generated from Rules on read
 12578  	JobACL      *JobACL
 12579  	Hash        []byte
 12580  
 12581  	CreateIndex uint64
 12582  	ModifyIndex uint64
 12583  }
 12584  
 12585  // JobACL represents an ACL policy's attachment to a job, group, or task.
 12586  type JobACL struct {
 12587  	Namespace string // namespace of the job
 12588  	JobID     string // ID of the job
 12589  	Group     string // ID of the group
 12590  	Task      string // ID of the task
 12591  }
 12592  
 12593  // SetHash is used to compute and set the hash of the ACL policy
 12594  func (a *ACLPolicy) SetHash() []byte {
 12595  	// Initialize a 256bit Blake2 hash (32 bytes)
 12596  	hash, err := blake2b.New256(nil)
 12597  	if err != nil {
 12598  		panic(err)
 12599  	}
 12600  
 12601  	// Write all the user set fields
 12602  	_, _ = hash.Write([]byte(a.Name))
 12603  	_, _ = hash.Write([]byte(a.Description))
 12604  	_, _ = hash.Write([]byte(a.Rules))
 12605  
 12606  	if a.JobACL != nil {
 12607  		_, _ = hash.Write([]byte(a.JobACL.Namespace))
 12608  		_, _ = hash.Write([]byte(a.JobACL.JobID))
 12609  		_, _ = hash.Write([]byte(a.JobACL.Group))
 12610  		_, _ = hash.Write([]byte(a.JobACL.Task))
 12611  	}
 12612  
 12613  	// Finalize the hash
 12614  	hashVal := hash.Sum(nil)
 12615  
 12616  	// Set and return the hash
 12617  	a.Hash = hashVal
 12618  	return hashVal
 12619  }
 12620  
 12621  func (a *ACLPolicy) Stub() *ACLPolicyListStub {
 12622  	return &ACLPolicyListStub{
 12623  		Name:        a.Name,
 12624  		Description: a.Description,
 12625  		Hash:        a.Hash,
 12626  		CreateIndex: a.CreateIndex,
 12627  		ModifyIndex: a.ModifyIndex,
 12628  	}
 12629  }
 12630  
 12631  func (a *ACLPolicy) Validate() error {
 12632  	var mErr multierror.Error
 12633  	if !ValidPolicyName.MatchString(a.Name) {
 12634  		err := fmt.Errorf("invalid name '%s'", a.Name)
 12635  		mErr.Errors = append(mErr.Errors, err)
 12636  	}
 12637  	if _, err := acl.Parse(a.Rules); err != nil {
 12638  		err = fmt.Errorf("failed to parse rules: %v", err)
 12639  		mErr.Errors = append(mErr.Errors, err)
 12640  	}
 12641  	if len(a.Description) > maxPolicyDescriptionLength {
 12642  		err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength)
 12643  		mErr.Errors = append(mErr.Errors, err)
 12644  	}
 12645  	if a.JobACL != nil {
 12646  		if a.JobACL.JobID != "" && a.JobACL.Namespace == "" {
 12647  			err := fmt.Errorf("namespace must be set to set job ID")
 12648  			mErr.Errors = append(mErr.Errors, err)
 12649  		}
 12650  		if a.JobACL.Group != "" && a.JobACL.JobID == "" {
 12651  			err := fmt.Errorf("job ID must be set to set group")
 12652  			mErr.Errors = append(mErr.Errors, err)
 12653  		}
 12654  		if a.JobACL.Task != "" && a.JobACL.Group == "" {
 12655  			err := fmt.Errorf("group must be set to set task")
 12656  			mErr.Errors = append(mErr.Errors, err)
 12657  		}
 12658  	}
 12659  
 12660  	return mErr.ErrorOrNil()
 12661  }
 12662  
 12663  // ACLPolicyListStub is used to for listing ACL policies
 12664  type ACLPolicyListStub struct {
 12665  	Name        string
 12666  	Description string
 12667  	Hash        []byte
 12668  	CreateIndex uint64
 12669  	ModifyIndex uint64
 12670  }
 12671  
 12672  // ACLPolicyListRequest is used to request a list of policies
 12673  type ACLPolicyListRequest struct {
 12674  	QueryOptions
 12675  }
 12676  
 12677  // ACLPolicySpecificRequest is used to query a specific policy
 12678  type ACLPolicySpecificRequest struct {
 12679  	Name string
 12680  	QueryOptions
 12681  }
 12682  
 12683  // ACLPolicySetRequest is used to query a set of policies
 12684  type ACLPolicySetRequest struct {
 12685  	Names []string
 12686  	QueryOptions
 12687  }
 12688  
 12689  // ACLPolicyListResponse is used for a list request
 12690  type ACLPolicyListResponse struct {
 12691  	Policies []*ACLPolicyListStub
 12692  	QueryMeta
 12693  }
 12694  
 12695  // SingleACLPolicyResponse is used to return a single policy
 12696  type SingleACLPolicyResponse struct {
 12697  	Policy *ACLPolicy
 12698  	QueryMeta
 12699  }
 12700  
 12701  // ACLPolicySetResponse is used to return a set of policies
 12702  type ACLPolicySetResponse struct {
 12703  	Policies map[string]*ACLPolicy
 12704  	QueryMeta
 12705  }
 12706  
 12707  // ACLPolicyDeleteRequest is used to delete a set of policies
 12708  type ACLPolicyDeleteRequest struct {
 12709  	Names []string
 12710  	WriteRequest
 12711  }
 12712  
 12713  // ACLPolicyUpsertRequest is used to upsert a set of policies
 12714  type ACLPolicyUpsertRequest struct {
 12715  	Policies []*ACLPolicy
 12716  	WriteRequest
 12717  }
 12718  
 12719  // ACLToken represents a client token which is used to Authenticate
 12720  type ACLToken struct {
 12721  	AccessorID string   // Public Accessor ID (UUID)
 12722  	SecretID   string   // Secret ID, private (UUID)
 12723  	Name       string   // Human friendly name
 12724  	Type       string   // Client or Management
 12725  	Policies   []string // Policies this token ties to
 12726  
 12727  	// Roles represents the ACL roles that this token is tied to. The token
 12728  	// will inherit the permissions of all policies detailed within the role.
 12729  	Roles []*ACLTokenRoleLink
 12730  
 12731  	Global     bool // Global or Region local
 12732  	Hash       []byte
 12733  	CreateTime time.Time // Time of creation
 12734  
 12735  	// ExpirationTime represents the point after which a token should be
 12736  	// considered revoked and is eligible for destruction. This time should
 12737  	// always use UTC to account for multi-region global tokens. It is a
 12738  	// pointer, so we can store nil, rather than the zero value of time.Time.
 12739  	ExpirationTime *time.Time
 12740  
 12741  	// ExpirationTTL is a convenience field for helping set ExpirationTime to a
 12742  	// value of CreateTime+ExpirationTTL. This can only be set during token
 12743  	// creation. This is a string version of a time.Duration like "2m".
 12744  	ExpirationTTL time.Duration
 12745  
 12746  	CreateIndex uint64
 12747  	ModifyIndex uint64
 12748  }
 12749  
 12750  // GetID implements the IDGetter interface, required for pagination.
 12751  func (a *ACLToken) GetID() string {
 12752  	if a == nil {
 12753  		return ""
 12754  	}
 12755  	return a.AccessorID
 12756  }
 12757  
 12758  // GetCreateIndex implements the CreateIndexGetter interface, required for
 12759  // pagination.
 12760  func (a *ACLToken) GetCreateIndex() uint64 {
 12761  	if a == nil {
 12762  		return 0
 12763  	}
 12764  	return a.CreateIndex
 12765  }
 12766  
 12767  func (a *ACLToken) Copy() *ACLToken {
 12768  	c := new(ACLToken)
 12769  	*c = *a
 12770  
 12771  	c.Policies = make([]string, len(a.Policies))
 12772  	copy(c.Policies, a.Policies)
 12773  
 12774  	c.Hash = make([]byte, len(a.Hash))
 12775  	copy(c.Hash, a.Hash)
 12776  
 12777  	c.Roles = make([]*ACLTokenRoleLink, len(a.Roles))
 12778  	copy(c.Roles, a.Roles)
 12779  
 12780  	return c
 12781  }
 12782  
 12783  var (
 12784  	// AnonymousACLToken is used no SecretID is provided, and the
 12785  	// request is made anonymously.
 12786  	AnonymousACLToken = &ACLToken{
 12787  		AccessorID: "anonymous",
 12788  		Name:       "Anonymous Token",
 12789  		Type:       ACLClientToken,
 12790  		Policies:   []string{"anonymous"},
 12791  		Global:     false,
 12792  	}
 12793  
 12794  	// LeaderACLToken is used to represent a leader's own token; this object
 12795  	// never gets used except on the leader
 12796  	LeaderACLToken = &ACLToken{
 12797  		AccessorID: "leader",
 12798  		Name:       "Leader Token",
 12799  		Type:       ACLManagementToken,
 12800  	}
 12801  )
 12802  
 12803  type ACLTokenListStub struct {
 12804  	AccessorID     string
 12805  	Name           string
 12806  	Type           string
 12807  	Policies       []string
 12808  	Roles          []*ACLTokenRoleLink
 12809  	Global         bool
 12810  	Hash           []byte
 12811  	CreateTime     time.Time
 12812  	ExpirationTime *time.Time
 12813  	CreateIndex    uint64
 12814  	ModifyIndex    uint64
 12815  }
 12816  
 12817  // SetHash is used to compute and set the hash of the ACL token. It only hashes
 12818  // fields which can be updated, and as such, does not hash fields such as
 12819  // ExpirationTime.
 12820  func (a *ACLToken) SetHash() []byte {
 12821  	// Initialize a 256bit Blake2 hash (32 bytes)
 12822  	hash, err := blake2b.New256(nil)
 12823  	if err != nil {
 12824  		panic(err)
 12825  	}
 12826  
 12827  	// Write all the user set fields
 12828  	_, _ = hash.Write([]byte(a.Name))
 12829  	_, _ = hash.Write([]byte(a.Type))
 12830  	for _, policyName := range a.Policies {
 12831  		_, _ = hash.Write([]byte(policyName))
 12832  	}
 12833  	if a.Global {
 12834  		_, _ = hash.Write([]byte("global"))
 12835  	} else {
 12836  		_, _ = hash.Write([]byte("local"))
 12837  	}
 12838  
 12839  	// Iterate the ACL role links and hash the ID. The ID is immutable and the
 12840  	// canonical way to reference a role. The name can be modified by
 12841  	// operators, but won't impact the ACL token resolution.
 12842  	for _, roleLink := range a.Roles {
 12843  		_, _ = hash.Write([]byte(roleLink.ID))
 12844  	}
 12845  
 12846  	// Finalize the hash
 12847  	hashVal := hash.Sum(nil)
 12848  
 12849  	// Set and return the hash
 12850  	a.Hash = hashVal
 12851  	return hashVal
 12852  }
 12853  
 12854  func (a *ACLToken) Stub() *ACLTokenListStub {
 12855  	return &ACLTokenListStub{
 12856  		AccessorID:     a.AccessorID,
 12857  		Name:           a.Name,
 12858  		Type:           a.Type,
 12859  		Policies:       a.Policies,
 12860  		Roles:          a.Roles,
 12861  		Global:         a.Global,
 12862  		Hash:           a.Hash,
 12863  		CreateTime:     a.CreateTime,
 12864  		ExpirationTime: a.ExpirationTime,
 12865  		CreateIndex:    a.CreateIndex,
 12866  		ModifyIndex:    a.ModifyIndex,
 12867  	}
 12868  }
 12869  
 12870  // ACLTokenListRequest is used to request a list of tokens
 12871  type ACLTokenListRequest struct {
 12872  	GlobalOnly bool
 12873  	QueryOptions
 12874  }
 12875  
 12876  // ACLTokenSpecificRequest is used to query a specific token
 12877  type ACLTokenSpecificRequest struct {
 12878  	AccessorID string
 12879  	QueryOptions
 12880  }
 12881  
 12882  // ACLTokenSetRequest is used to query a set of tokens
 12883  type ACLTokenSetRequest struct {
 12884  	AccessorIDS []string
 12885  	QueryOptions
 12886  }
 12887  
 12888  // ACLTokenListResponse is used for a list request
 12889  type ACLTokenListResponse struct {
 12890  	Tokens []*ACLTokenListStub
 12891  	QueryMeta
 12892  }
 12893  
 12894  // SingleACLTokenResponse is used to return a single token
 12895  type SingleACLTokenResponse struct {
 12896  	Token *ACLToken
 12897  	QueryMeta
 12898  }
 12899  
 12900  // ACLTokenSetResponse is used to return a set of token
 12901  type ACLTokenSetResponse struct {
 12902  	Tokens map[string]*ACLToken // Keyed by Accessor ID
 12903  	QueryMeta
 12904  }
 12905  
 12906  // ResolveACLTokenRequest is used to resolve a specific token
 12907  type ResolveACLTokenRequest struct {
 12908  	SecretID string
 12909  	QueryOptions
 12910  }
 12911  
 12912  // ResolveACLTokenResponse is used to resolve a single token
 12913  type ResolveACLTokenResponse struct {
 12914  	Token *ACLToken
 12915  	QueryMeta
 12916  }
 12917  
 12918  // ACLTokenDeleteRequest is used to delete a set of tokens
 12919  type ACLTokenDeleteRequest struct {
 12920  	AccessorIDs []string
 12921  	WriteRequest
 12922  }
 12923  
 12924  // ACLTokenBootstrapRequest is used to bootstrap ACLs
 12925  type ACLTokenBootstrapRequest struct {
 12926  	Token           *ACLToken // Not client specifiable
 12927  	ResetIndex      uint64    // Reset index is used to clear the bootstrap token
 12928  	BootstrapSecret string
 12929  	WriteRequest
 12930  }
 12931  
 12932  // ACLTokenUpsertRequest is used to upsert a set of tokens
 12933  type ACLTokenUpsertRequest struct {
 12934  	Tokens []*ACLToken
 12935  	WriteRequest
 12936  }
 12937  
 12938  // ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest
 12939  type ACLTokenUpsertResponse struct {
 12940  	Tokens []*ACLToken
 12941  	WriteMeta
 12942  }
 12943  
 12944  // OneTimeToken is used to log into the web UI using a token provided by the
 12945  // command line.
 12946  type OneTimeToken struct {
 12947  	OneTimeSecretID string
 12948  	AccessorID      string
 12949  	ExpiresAt       time.Time
 12950  	CreateIndex     uint64
 12951  	ModifyIndex     uint64
 12952  }
 12953  
 12954  // OneTimeTokenUpsertRequest is the request for a UpsertOneTimeToken RPC
 12955  type OneTimeTokenUpsertRequest struct {
 12956  	WriteRequest
 12957  }
 12958  
 12959  // OneTimeTokenUpsertResponse is the response to a UpsertOneTimeToken RPC.
 12960  type OneTimeTokenUpsertResponse struct {
 12961  	OneTimeToken *OneTimeToken
 12962  	WriteMeta
 12963  }
 12964  
 12965  // OneTimeTokenExchangeRequest is a request to swap the one-time token with
 12966  // the backing ACL token
 12967  type OneTimeTokenExchangeRequest struct {
 12968  	OneTimeSecretID string
 12969  	WriteRequest
 12970  }
 12971  
 12972  // OneTimeTokenExchangeResponse is the response to swapping the one-time token
 12973  // with the backing ACL token
 12974  type OneTimeTokenExchangeResponse struct {
 12975  	Token *ACLToken
 12976  	WriteMeta
 12977  }
 12978  
 12979  // OneTimeTokenDeleteRequest is a request to delete a group of one-time tokens
 12980  type OneTimeTokenDeleteRequest struct {
 12981  	AccessorIDs []string
 12982  	WriteRequest
 12983  }
 12984  
 12985  // OneTimeTokenExpireRequest is a request to delete all expired one-time tokens
 12986  type OneTimeTokenExpireRequest struct {
 12987  	Timestamp time.Time
 12988  	WriteRequest
 12989  }
 12990  
 12991  // RpcError is used for serializing errors with a potential error code
 12992  type RpcError struct {
 12993  	Message string
 12994  	Code    *int64
 12995  }
 12996  
 12997  func NewRpcError(err error, code *int64) *RpcError {
 12998  	return &RpcError{
 12999  		Message: err.Error(),
 13000  		Code:    code,
 13001  	}
 13002  }
 13003  
 13004  func (r *RpcError) Error() string {
 13005  	return r.Message
 13006  }