github.com/ThomasObenaus/nomad@v0.11.1/nomad/structs/structs.go (about)

     1  package structs
     2  
     3  import (
     4  	"bytes"
     5  	"container/heap"
     6  	"crypto/md5"
     7  	"crypto/sha1"
     8  	"crypto/sha256"
     9  	"crypto/sha512"
    10  	"encoding/base32"
    11  	"encoding/base64"
    12  	"encoding/hex"
    13  	"errors"
    14  	"fmt"
    15  	"math"
    16  	"net"
    17  	"os"
    18  	"path/filepath"
    19  	"reflect"
    20  	"regexp"
    21  	"sort"
    22  	"strconv"
    23  	"strings"
    24  	"time"
    25  
    26  	"github.com/gorhill/cronexpr"
    27  	"github.com/hashicorp/go-msgpack/codec"
    28  	hcodec "github.com/hashicorp/go-msgpack/codec"
    29  	"github.com/hashicorp/go-multierror"
    30  	"github.com/hashicorp/go-version"
    31  	"github.com/mitchellh/copystructure"
    32  	"golang.org/x/crypto/blake2b"
    33  
    34  	"github.com/hashicorp/nomad/acl"
    35  	"github.com/hashicorp/nomad/command/agent/pprof"
    36  	"github.com/hashicorp/nomad/helper"
    37  	"github.com/hashicorp/nomad/helper/args"
    38  	"github.com/hashicorp/nomad/helper/constraints/semver"
    39  	"github.com/hashicorp/nomad/helper/uuid"
    40  	"github.com/hashicorp/nomad/lib/kheap"
    41  	psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
    42  )
    43  
    44  var (
    45  	// validPolicyName is used to validate a policy name
    46  	validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
    47  
    48  	// b32 is a lowercase base32 encoding for use in URL friendly service hashes
    49  	b32 = base32.NewEncoding(strings.ToLower("abcdefghijklmnopqrstuvwxyz234567"))
    50  )
    51  
    52  type MessageType uint8
    53  
    54  const (
    55  	NodeRegisterRequestType MessageType = iota
    56  	NodeDeregisterRequestType
    57  	NodeUpdateStatusRequestType
    58  	NodeUpdateDrainRequestType
    59  	JobRegisterRequestType
    60  	JobDeregisterRequestType
    61  	EvalUpdateRequestType
    62  	EvalDeleteRequestType
    63  	AllocUpdateRequestType
    64  	AllocClientUpdateRequestType
    65  	ReconcileJobSummariesRequestType
    66  	VaultAccessorRegisterRequestType
    67  	VaultAccessorDeregisterRequestType
    68  	ApplyPlanResultsRequestType
    69  	DeploymentStatusUpdateRequestType
    70  	DeploymentPromoteRequestType
    71  	DeploymentAllocHealthRequestType
    72  	DeploymentDeleteRequestType
    73  	JobStabilityRequestType
    74  	ACLPolicyUpsertRequestType
    75  	ACLPolicyDeleteRequestType
    76  	ACLTokenUpsertRequestType
    77  	ACLTokenDeleteRequestType
    78  	ACLTokenBootstrapRequestType
    79  	AutopilotRequestType
    80  	UpsertNodeEventsType
    81  	JobBatchDeregisterRequestType
    82  	AllocUpdateDesiredTransitionRequestType
    83  	NodeUpdateEligibilityRequestType
    84  	BatchNodeUpdateDrainRequestType
    85  	SchedulerConfigRequestType
    86  	NodeBatchDeregisterRequestType
    87  	ClusterMetadataRequestType
    88  	ServiceIdentityAccessorRegisterRequestType
    89  	ServiceIdentityAccessorDeregisterRequestType
    90  	CSIVolumeRegisterRequestType
    91  	CSIVolumeDeregisterRequestType
    92  	CSIVolumeClaimRequestType
    93  	ScalingEventRegisterRequestType
    94  )
    95  
    96  const (
    97  	// IgnoreUnknownTypeFlag is set along with a MessageType
    98  	// to indicate that the message type can be safely ignored
    99  	// if it is not recognized. This is for future proofing, so
   100  	// that new commands can be added in a way that won't cause
   101  	// old servers to crash when the FSM attempts to process them.
   102  	IgnoreUnknownTypeFlag MessageType = 128
   103  
   104  	// ApiMajorVersion is returned as part of the Status.Version request.
   105  	// It should be incremented anytime the APIs are changed in a way
   106  	// that would break clients for sane client versioning.
   107  	ApiMajorVersion = 1
   108  
   109  	// ApiMinorVersion is returned as part of the Status.Version request.
   110  	// It should be incremented anytime the APIs are changed to allow
   111  	// for sane client versioning. Minor changes should be compatible
   112  	// within the major version.
   113  	ApiMinorVersion = 1
   114  
   115  	ProtocolVersion = "protocol"
   116  	APIMajorVersion = "api.major"
   117  	APIMinorVersion = "api.minor"
   118  
   119  	GetterModeAny  = "any"
   120  	GetterModeFile = "file"
   121  	GetterModeDir  = "dir"
   122  
   123  	// maxPolicyDescriptionLength limits a policy description length
   124  	maxPolicyDescriptionLength = 256
   125  
   126  	// maxTokenNameLength limits a ACL token name length
   127  	maxTokenNameLength = 256
   128  
   129  	// ACLClientToken and ACLManagementToken are the only types of tokens
   130  	ACLClientToken     = "client"
   131  	ACLManagementToken = "management"
   132  
   133  	// DefaultNamespace is the default namespace.
   134  	DefaultNamespace            = "default"
   135  	DefaultNamespaceDescription = "Default shared namespace"
   136  
   137  	// JitterFraction is a the limit to the amount of jitter we apply
   138  	// to a user specified MaxQueryTime. We divide the specified time by
   139  	// the fraction. So 16 == 6.25% limit of jitter. This jitter is also
   140  	// applied to RPCHoldTimeout.
   141  	JitterFraction = 16
   142  
   143  	// MaxRetainedNodeEvents is the maximum number of node events that will be
   144  	// retained for a single node
   145  	MaxRetainedNodeEvents = 10
   146  
   147  	// MaxRetainedNodeScores is the number of top scoring nodes for which we
   148  	// retain scoring metadata
   149  	MaxRetainedNodeScores = 5
   150  
   151  	// Normalized scorer name
   152  	NormScorerName = "normalized-score"
   153  )
   154  
   155  // Context defines the scope in which a search for Nomad object operates, and
   156  // is also used to query the matching index value for this context
   157  type Context string
   158  
   159  const (
   160  	Allocs      Context = "allocs"
   161  	Deployments Context = "deployment"
   162  	Evals       Context = "evals"
   163  	Jobs        Context = "jobs"
   164  	Nodes       Context = "nodes"
   165  	Namespaces  Context = "namespaces"
   166  	Quotas      Context = "quotas"
   167  	All         Context = "all"
   168  	Plugins     Context = "plugins"
   169  	Volumes     Context = "volumes"
   170  )
   171  
   172  // NamespacedID is a tuple of an ID and a namespace
   173  type NamespacedID struct {
   174  	ID        string
   175  	Namespace string
   176  }
   177  
   178  // NewNamespacedID returns a new namespaced ID given the ID and namespace
   179  func NewNamespacedID(id, ns string) NamespacedID {
   180  	return NamespacedID{
   181  		ID:        id,
   182  		Namespace: ns,
   183  	}
   184  }
   185  
   186  func (n NamespacedID) String() string {
   187  	return fmt.Sprintf("<ns: %q, id: %q>", n.Namespace, n.ID)
   188  }
   189  
   190  // RPCInfo is used to describe common information about query
   191  type RPCInfo interface {
   192  	RequestRegion() string
   193  	IsRead() bool
   194  	AllowStaleRead() bool
   195  	IsForwarded() bool
   196  	SetForwarded()
   197  }
   198  
   199  // InternalRpcInfo allows adding internal RPC metadata to an RPC. This struct
   200  // should NOT be replicated in the API package as it is internal only.
   201  type InternalRpcInfo struct {
   202  	// Forwarded marks whether the RPC has been forwarded.
   203  	Forwarded bool
   204  }
   205  
   206  // IsForwarded returns whether the RPC is forwarded from another server.
   207  func (i *InternalRpcInfo) IsForwarded() bool {
   208  	return i.Forwarded
   209  }
   210  
   211  // SetForwarded marks that the RPC is being forwarded from another server.
   212  func (i *InternalRpcInfo) SetForwarded() {
   213  	i.Forwarded = true
   214  }
   215  
   216  // QueryOptions is used to specify various flags for read queries
   217  type QueryOptions struct {
   218  	// The target region for this query
   219  	Region string
   220  
   221  	// Namespace is the target namespace for the query.
   222  	//
   223  	// Since handlers do not have a default value set they should access
   224  	// the Namespace via the RequestNamespace method.
   225  	//
   226  	// Requests accessing specific namespaced objects must check ACLs
   227  	// against the namespace of the object, not the namespace in the
   228  	// request.
   229  	Namespace string
   230  
   231  	// If set, wait until query exceeds given index. Must be provided
   232  	// with MaxQueryTime.
   233  	MinQueryIndex uint64
   234  
   235  	// Provided with MinQueryIndex to wait for change.
   236  	MaxQueryTime time.Duration
   237  
   238  	// If set, any follower can service the request. Results
   239  	// may be arbitrarily stale.
   240  	AllowStale bool
   241  
   242  	// If set, used as prefix for resource list searches
   243  	Prefix string
   244  
   245  	// AuthToken is secret portion of the ACL token used for the request
   246  	AuthToken string
   247  
   248  	InternalRpcInfo
   249  }
   250  
   251  func (q QueryOptions) RequestRegion() string {
   252  	return q.Region
   253  }
   254  
   255  // RequestNamespace returns the request's namespace or the default namespace if
   256  // no explicit namespace was sent.
   257  //
   258  // Requests accessing specific namespaced objects must check ACLs against the
   259  // namespace of the object, not the namespace in the request.
   260  func (q QueryOptions) RequestNamespace() string {
   261  	if q.Namespace == "" {
   262  		return DefaultNamespace
   263  	}
   264  	return q.Namespace
   265  }
   266  
   267  // QueryOption only applies to reads, so always true
   268  func (q QueryOptions) IsRead() bool {
   269  	return true
   270  }
   271  
   272  func (q QueryOptions) AllowStaleRead() bool {
   273  	return q.AllowStale
   274  }
   275  
   276  // AgentPprofRequest is used to request a pprof report for a given node.
   277  type AgentPprofRequest struct {
   278  	// ReqType specifies the profile to use
   279  	ReqType pprof.ReqType
   280  
   281  	// Profile specifies the runtime/pprof profile to lookup and generate.
   282  	Profile string
   283  
   284  	// Seconds is the number of seconds to capture a profile
   285  	Seconds int
   286  
   287  	// Debug specifies if pprof profile should inclue debug output
   288  	Debug int
   289  
   290  	// GC specifies if the profile should call runtime.GC() before
   291  	// running its profile. This is only used for "heap" profiles
   292  	GC int
   293  
   294  	// NodeID is the node we want to track the logs of
   295  	NodeID string
   296  
   297  	// ServerID is the server we want to track the logs of
   298  	ServerID string
   299  
   300  	QueryOptions
   301  }
   302  
   303  // AgentPprofResponse is used to return a generated pprof profile
   304  type AgentPprofResponse struct {
   305  	// ID of the agent that fulfilled the request
   306  	AgentID string
   307  
   308  	// Payload is the generated pprof profile
   309  	Payload []byte
   310  
   311  	// HTTPHeaders are a set of key value pairs to be applied as
   312  	// HTTP headers for a specific runtime profile
   313  	HTTPHeaders map[string]string
   314  }
   315  
   316  type WriteRequest struct {
   317  	// The target region for this write
   318  	Region string
   319  
   320  	// Namespace is the target namespace for the write.
   321  	//
   322  	// Since RPC handlers do not have a default value set they should
   323  	// access the Namespace via the RequestNamespace method.
   324  	//
   325  	// Requests accessing specific namespaced objects must check ACLs
   326  	// against the namespace of the object, not the namespace in the
   327  	// request.
   328  	Namespace string
   329  
   330  	// AuthToken is secret portion of the ACL token used for the request
   331  	AuthToken string
   332  
   333  	InternalRpcInfo
   334  }
   335  
   336  func (w WriteRequest) RequestRegion() string {
   337  	// The target region for this request
   338  	return w.Region
   339  }
   340  
   341  // RequestNamespace returns the request's namespace or the default namespace if
   342  // no explicit namespace was sent.
   343  //
   344  // Requests accessing specific namespaced objects must check ACLs against the
   345  // namespace of the object, not the namespace in the request.
   346  func (w WriteRequest) RequestNamespace() string {
   347  	if w.Namespace == "" {
   348  		return DefaultNamespace
   349  	}
   350  	return w.Namespace
   351  }
   352  
   353  // WriteRequest only applies to writes, always false
   354  func (w WriteRequest) IsRead() bool {
   355  	return false
   356  }
   357  
   358  func (w WriteRequest) AllowStaleRead() bool {
   359  	return false
   360  }
   361  
   362  // QueryMeta allows a query response to include potentially
   363  // useful metadata about a query
   364  type QueryMeta struct {
   365  	// This is the index associated with the read
   366  	Index uint64
   367  
   368  	// If AllowStale is used, this is time elapsed since
   369  	// last contact between the follower and leader. This
   370  	// can be used to gauge staleness.
   371  	LastContact time.Duration
   372  
   373  	// Used to indicate if there is a known leader node
   374  	KnownLeader bool
   375  }
   376  
   377  // WriteMeta allows a write response to include potentially
   378  // useful metadata about the write
   379  type WriteMeta struct {
   380  	// This is the index associated with the write
   381  	Index uint64
   382  }
   383  
   384  // NodeRegisterRequest is used for Node.Register endpoint
   385  // to register a node as being a schedulable entity.
   386  type NodeRegisterRequest struct {
   387  	Node      *Node
   388  	NodeEvent *NodeEvent
   389  	WriteRequest
   390  }
   391  
   392  // NodeDeregisterRequest is used for Node.Deregister endpoint
   393  // to deregister a node as being a schedulable entity.
   394  type NodeDeregisterRequest struct {
   395  	NodeID string
   396  	WriteRequest
   397  }
   398  
   399  // NodeBatchDeregisterRequest is used for Node.BatchDeregister endpoint
   400  // to deregister a batch of nodes from being schedulable entities.
   401  type NodeBatchDeregisterRequest struct {
   402  	NodeIDs []string
   403  	WriteRequest
   404  }
   405  
   406  // NodeServerInfo is used to in NodeUpdateResponse to return Nomad server
   407  // information used in RPC server lists.
   408  type NodeServerInfo struct {
   409  	// RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to
   410  	// be contacted at for RPCs.
   411  	RPCAdvertiseAddr string
   412  
   413  	// RpcMajorVersion is the major version number the Nomad Server
   414  	// supports
   415  	RPCMajorVersion int32
   416  
   417  	// RpcMinorVersion is the minor version number the Nomad Server
   418  	// supports
   419  	RPCMinorVersion int32
   420  
   421  	// Datacenter is the datacenter that a Nomad server belongs to
   422  	Datacenter string
   423  }
   424  
   425  // NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint
   426  // to update the status of a node.
   427  type NodeUpdateStatusRequest struct {
   428  	NodeID    string
   429  	Status    string
   430  	NodeEvent *NodeEvent
   431  	UpdatedAt int64
   432  	WriteRequest
   433  }
   434  
   435  // NodeUpdateDrainRequest is used for updating the drain strategy
   436  type NodeUpdateDrainRequest struct {
   437  	NodeID        string
   438  	DrainStrategy *DrainStrategy
   439  
   440  	// COMPAT Remove in version 0.10
   441  	// As part of Nomad 0.8 we have deprecated the drain boolean in favor of a
   442  	// drain strategy but we need to handle the upgrade path where the Raft log
   443  	// contains drain updates with just the drain boolean being manipulated.
   444  	Drain bool
   445  
   446  	// MarkEligible marks the node as eligible if removing the drain strategy.
   447  	MarkEligible bool
   448  
   449  	// NodeEvent is the event added to the node
   450  	NodeEvent *NodeEvent
   451  
   452  	// UpdatedAt represents server time of receiving request
   453  	UpdatedAt int64
   454  
   455  	WriteRequest
   456  }
   457  
   458  // BatchNodeUpdateDrainRequest is used for updating the drain strategy for a
   459  // batch of nodes
   460  type BatchNodeUpdateDrainRequest struct {
   461  	// Updates is a mapping of nodes to their updated drain strategy
   462  	Updates map[string]*DrainUpdate
   463  
   464  	// NodeEvents is a mapping of the node to the event to add to the node
   465  	NodeEvents map[string]*NodeEvent
   466  
   467  	// UpdatedAt represents server time of receiving request
   468  	UpdatedAt int64
   469  
   470  	WriteRequest
   471  }
   472  
   473  // DrainUpdate is used to update the drain of a node
   474  type DrainUpdate struct {
   475  	// DrainStrategy is the new strategy for the node
   476  	DrainStrategy *DrainStrategy
   477  
   478  	// MarkEligible marks the node as eligible if removing the drain strategy.
   479  	MarkEligible bool
   480  }
   481  
   482  // NodeUpdateEligibilityRequest is used for updating the scheduling	eligibility
   483  type NodeUpdateEligibilityRequest struct {
   484  	NodeID      string
   485  	Eligibility string
   486  
   487  	// NodeEvent is the event added to the node
   488  	NodeEvent *NodeEvent
   489  
   490  	// UpdatedAt represents server time of receiving request
   491  	UpdatedAt int64
   492  
   493  	WriteRequest
   494  }
   495  
   496  // NodeEvaluateRequest is used to re-evaluate the node
   497  type NodeEvaluateRequest struct {
   498  	NodeID string
   499  	WriteRequest
   500  }
   501  
   502  // NodeSpecificRequest is used when we just need to specify a target node
   503  type NodeSpecificRequest struct {
   504  	NodeID   string
   505  	SecretID string
   506  	QueryOptions
   507  }
   508  
   509  // SearchResponse is used to return matches and information about whether
   510  // the match list is truncated specific to each type of context.
   511  type SearchResponse struct {
   512  	// Map of context types to ids which match a specified prefix
   513  	Matches map[Context][]string
   514  
   515  	// Truncations indicates whether the matches for a particular context have
   516  	// been truncated
   517  	Truncations map[Context]bool
   518  
   519  	QueryMeta
   520  }
   521  
   522  // SearchRequest is used to parameterize a request, and returns a
   523  // list of matches made up of jobs, allocations, evaluations, and/or nodes,
   524  // along with whether or not the information returned is truncated.
   525  type SearchRequest struct {
   526  	// Prefix is what ids are matched to. I.e, if the given prefix were
   527  	// "a", potential matches might be "abcd" or "aabb"
   528  	Prefix string
   529  
   530  	// Context is the type that can be matched against. A context can be a job,
   531  	// node, evaluation, allocation, or empty (indicated every context should be
   532  	// matched)
   533  	Context Context
   534  
   535  	QueryOptions
   536  }
   537  
   538  // JobRegisterRequest is used for Job.Register endpoint
   539  // to register a job as being a schedulable entity.
   540  type JobRegisterRequest struct {
   541  	Job *Job
   542  
   543  	// If EnforceIndex is set then the job will only be registered if the passed
   544  	// JobModifyIndex matches the current Jobs index. If the index is zero, the
   545  	// register only occurs if the job is new.
   546  	EnforceIndex   bool
   547  	JobModifyIndex uint64
   548  
   549  	// PolicyOverride is set when the user is attempting to override any policies
   550  	PolicyOverride bool
   551  
   552  	WriteRequest
   553  }
   554  
   555  // JobDeregisterRequest is used for Job.Deregister endpoint
   556  // to deregister a job as being a schedulable entity.
   557  type JobDeregisterRequest struct {
   558  	JobID string
   559  
   560  	// Purge controls whether the deregister purges the job from the system or
   561  	// whether the job is just marked as stopped and will be removed by the
   562  	// garbage collector
   563  	Purge bool
   564  
   565  	WriteRequest
   566  }
   567  
   568  // JobBatchDeregisterRequest is used to batch deregister jobs and upsert
   569  // evaluations.
   570  type JobBatchDeregisterRequest struct {
   571  	// Jobs is the set of jobs to deregister
   572  	Jobs map[NamespacedID]*JobDeregisterOptions
   573  
   574  	// Evals is the set of evaluations to create.
   575  	Evals []*Evaluation
   576  
   577  	WriteRequest
   578  }
   579  
   580  // JobDeregisterOptions configures how a job is deregistered.
   581  type JobDeregisterOptions struct {
   582  	// Purge controls whether the deregister purges the job from the system or
   583  	// whether the job is just marked as stopped and will be removed by the
   584  	// garbage collector
   585  	Purge bool
   586  }
   587  
   588  // JobEvaluateRequest is used when we just need to re-evaluate a target job
   589  type JobEvaluateRequest struct {
   590  	JobID       string
   591  	EvalOptions EvalOptions
   592  	WriteRequest
   593  }
   594  
   595  // EvalOptions is used to encapsulate options when forcing a job evaluation
   596  type EvalOptions struct {
   597  	ForceReschedule bool
   598  }
   599  
   600  // JobSpecificRequest is used when we just need to specify a target job
   601  type JobSpecificRequest struct {
   602  	JobID string
   603  	All   bool
   604  	QueryOptions
   605  }
   606  
   607  // JobListRequest is used to parameterize a list request
   608  type JobListRequest struct {
   609  	QueryOptions
   610  }
   611  
   612  // JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run
   613  // evaluation of the Job.
   614  type JobPlanRequest struct {
   615  	Job  *Job
   616  	Diff bool // Toggles an annotated diff
   617  	// PolicyOverride is set when the user is attempting to override any policies
   618  	PolicyOverride bool
   619  	WriteRequest
   620  }
   621  
   622  // JobScaleRequest is used for the Job.Scale endpoint to scale one of the
   623  // scaling targets in a job
   624  type JobScaleRequest struct {
   625  	Namespace string
   626  	JobID     string
   627  	Target    map[string]string
   628  	Count     *int64
   629  	Message   string
   630  	Error     bool
   631  	Meta      map[string]interface{}
   632  	// PolicyOverride is set when the user is attempting to override any policies
   633  	PolicyOverride bool
   634  	WriteRequest
   635  }
   636  
   637  // JobSummaryRequest is used when we just need to get a specific job summary
   638  type JobSummaryRequest struct {
   639  	JobID string
   640  	QueryOptions
   641  }
   642  
   643  // JobScaleStatusRequest is used to get the scale status for a job
   644  type JobScaleStatusRequest struct {
   645  	JobID string
   646  	QueryOptions
   647  }
   648  
   649  // JobDispatchRequest is used to dispatch a job based on a parameterized job
   650  type JobDispatchRequest struct {
   651  	JobID   string
   652  	Payload []byte
   653  	Meta    map[string]string
   654  	WriteRequest
   655  }
   656  
   657  // JobValidateRequest is used to validate a job
   658  type JobValidateRequest struct {
   659  	Job *Job
   660  	WriteRequest
   661  }
   662  
   663  // JobRevertRequest is used to revert a job to a prior version.
   664  type JobRevertRequest struct {
   665  	// JobID is the ID of the job  being reverted
   666  	JobID string
   667  
   668  	// JobVersion the version to revert to.
   669  	JobVersion uint64
   670  
   671  	// EnforcePriorVersion if set will enforce that the job is at the given
   672  	// version before reverting.
   673  	EnforcePriorVersion *uint64
   674  
   675  	// ConsulToken is the Consul token that proves the submitter of the job revert
   676  	// has access to the Service Identity policies associated with the job's
   677  	// Consul Connect enabled services. This field is only used to transfer the
   678  	// token and is not stored after the Job revert.
   679  	ConsulToken string
   680  
   681  	// VaultToken is the Vault token that proves the submitter of the job revert
   682  	// has access to any Vault policies specified in the targeted job version. This
   683  	// field is only used to transfer the token and is not stored after the Job
   684  	// revert.
   685  	VaultToken string
   686  
   687  	WriteRequest
   688  }
   689  
   690  // JobStabilityRequest is used to marked a job as stable.
   691  type JobStabilityRequest struct {
   692  	// Job to set the stability on
   693  	JobID      string
   694  	JobVersion uint64
   695  
   696  	// Set the stability
   697  	Stable bool
   698  	WriteRequest
   699  }
   700  
   701  // JobStabilityResponse is the response when marking a job as stable.
   702  type JobStabilityResponse struct {
   703  	WriteMeta
   704  }
   705  
   706  // NodeListRequest is used to parameterize a list request
   707  type NodeListRequest struct {
   708  	QueryOptions
   709  }
   710  
   711  // EvalUpdateRequest is used for upserting evaluations.
   712  type EvalUpdateRequest struct {
   713  	Evals     []*Evaluation
   714  	EvalToken string
   715  	WriteRequest
   716  }
   717  
   718  // EvalDeleteRequest is used for deleting an evaluation.
   719  type EvalDeleteRequest struct {
   720  	Evals  []string
   721  	Allocs []string
   722  	WriteRequest
   723  }
   724  
   725  // EvalSpecificRequest is used when we just need to specify a target evaluation
   726  type EvalSpecificRequest struct {
   727  	EvalID string
   728  	QueryOptions
   729  }
   730  
   731  // EvalAckRequest is used to Ack/Nack a specific evaluation
   732  type EvalAckRequest struct {
   733  	EvalID string
   734  	Token  string
   735  	WriteRequest
   736  }
   737  
   738  // EvalDequeueRequest is used when we want to dequeue an evaluation
   739  type EvalDequeueRequest struct {
   740  	Schedulers       []string
   741  	Timeout          time.Duration
   742  	SchedulerVersion uint16
   743  	WriteRequest
   744  }
   745  
   746  // EvalListRequest is used to list the evaluations
   747  type EvalListRequest struct {
   748  	QueryOptions
   749  }
   750  
   751  // PlanRequest is used to submit an allocation plan to the leader
   752  type PlanRequest struct {
   753  	Plan *Plan
   754  	WriteRequest
   755  }
   756  
   757  // ApplyPlanResultsRequest is used by the planner to apply a Raft transaction
   758  // committing the result of a plan.
   759  type ApplyPlanResultsRequest struct {
   760  	// AllocUpdateRequest holds the allocation updates to be made by the
   761  	// scheduler.
   762  	AllocUpdateRequest
   763  
   764  	// Deployment is the deployment created or updated as a result of a
   765  	// scheduling event.
   766  	Deployment *Deployment
   767  
   768  	// DeploymentUpdates is a set of status updates to apply to the given
   769  	// deployments. This allows the scheduler to cancel any unneeded deployment
   770  	// because the job is stopped or the update block is removed.
   771  	DeploymentUpdates []*DeploymentStatusUpdate
   772  
   773  	// EvalID is the eval ID of the plan being applied. The modify index of the
   774  	// evaluation is updated as part of applying the plan to ensure that subsequent
   775  	// scheduling events for the same job will wait for the index that last produced
   776  	// state changes. This is necessary for blocked evaluations since they can be
   777  	// processed many times, potentially making state updates, without the state of
   778  	// the evaluation itself being updated.
   779  	EvalID string
   780  
   781  	// COMPAT 0.11
   782  	// NodePreemptions is a slice of allocations from other lower priority jobs
   783  	// that are preempted. Preempted allocations are marked as evicted.
   784  	// Deprecated: Replaced with AllocsPreempted which contains only the diff
   785  	NodePreemptions []*Allocation
   786  
   787  	// AllocsPreempted is a slice of allocation diffs from other lower priority jobs
   788  	// that are preempted. Preempted allocations are marked as evicted.
   789  	AllocsPreempted []*AllocationDiff
   790  
   791  	// PreemptionEvals is a slice of follow up evals for jobs whose allocations
   792  	// have been preempted to place allocs in this plan
   793  	PreemptionEvals []*Evaluation
   794  }
   795  
   796  // AllocUpdateRequest is used to submit changes to allocations, either
   797  // to cause evictions or to assign new allocations. Both can be done
   798  // within a single transaction
   799  type AllocUpdateRequest struct {
   800  	// COMPAT 0.11
   801  	// Alloc is the list of new allocations to assign
   802  	// Deprecated: Replaced with two separate slices, one containing stopped allocations
   803  	// and another containing updated allocations
   804  	Alloc []*Allocation
   805  
   806  	// Allocations to stop. Contains only the diff, not the entire allocation
   807  	AllocsStopped []*AllocationDiff
   808  
   809  	// New or updated allocations
   810  	AllocsUpdated []*Allocation
   811  
   812  	// Evals is the list of new evaluations to create
   813  	// Evals are valid only when used in the Raft RPC
   814  	Evals []*Evaluation
   815  
   816  	// Job is the shared parent job of the allocations.
   817  	// It is pulled out since it is common to reduce payload size.
   818  	Job *Job
   819  
   820  	WriteRequest
   821  }
   822  
   823  // AllocUpdateDesiredTransitionRequest is used to submit changes to allocations
   824  // desired transition state.
   825  type AllocUpdateDesiredTransitionRequest struct {
   826  	// Allocs is the mapping of allocation ids to their desired state
   827  	// transition
   828  	Allocs map[string]*DesiredTransition
   829  
   830  	// Evals is the set of evaluations to create
   831  	Evals []*Evaluation
   832  
   833  	WriteRequest
   834  }
   835  
   836  // AllocStopRequest is used to stop and reschedule a running Allocation.
   837  type AllocStopRequest struct {
   838  	AllocID string
   839  
   840  	WriteRequest
   841  }
   842  
   843  // AllocStopResponse is the response to an `AllocStopRequest`
   844  type AllocStopResponse struct {
   845  	// EvalID is the id of the follow up evalution for the rescheduled alloc.
   846  	EvalID string
   847  
   848  	WriteMeta
   849  }
   850  
   851  // AllocListRequest is used to request a list of allocations
   852  type AllocListRequest struct {
   853  	QueryOptions
   854  }
   855  
   856  // AllocSpecificRequest is used to query a specific allocation
   857  type AllocSpecificRequest struct {
   858  	AllocID string
   859  	QueryOptions
   860  }
   861  
   862  // AllocSignalRequest is used to signal a specific allocation
   863  type AllocSignalRequest struct {
   864  	AllocID string
   865  	Task    string
   866  	Signal  string
   867  	QueryOptions
   868  }
   869  
   870  // AllocsGetRequest is used to query a set of allocations
   871  type AllocsGetRequest struct {
   872  	AllocIDs []string
   873  	QueryOptions
   874  }
   875  
   876  // AllocRestartRequest is used to restart a specific allocations tasks.
   877  type AllocRestartRequest struct {
   878  	AllocID  string
   879  	TaskName string
   880  
   881  	QueryOptions
   882  }
   883  
   884  // PeriodicForceRequest is used to force a specific periodic job.
   885  type PeriodicForceRequest struct {
   886  	JobID string
   887  	WriteRequest
   888  }
   889  
   890  // ServerMembersResponse has the list of servers in a cluster
   891  type ServerMembersResponse struct {
   892  	ServerName   string
   893  	ServerRegion string
   894  	ServerDC     string
   895  	Members      []*ServerMember
   896  }
   897  
   898  // ServerMember holds information about a Nomad server agent in a cluster
   899  type ServerMember struct {
   900  	Name        string
   901  	Addr        net.IP
   902  	Port        uint16
   903  	Tags        map[string]string
   904  	Status      string
   905  	ProtocolMin uint8
   906  	ProtocolMax uint8
   907  	ProtocolCur uint8
   908  	DelegateMin uint8
   909  	DelegateMax uint8
   910  	DelegateCur uint8
   911  }
   912  
   913  // ClusterMetadata is used to store per-cluster metadata.
   914  type ClusterMetadata struct {
   915  	ClusterID  string
   916  	CreateTime int64
   917  }
   918  
   919  // DeriveVaultTokenRequest is used to request wrapped Vault tokens for the
   920  // following tasks in the given allocation
   921  type DeriveVaultTokenRequest struct {
   922  	NodeID   string
   923  	SecretID string
   924  	AllocID  string
   925  	Tasks    []string
   926  	QueryOptions
   927  }
   928  
   929  // VaultAccessorsRequest is used to operate on a set of Vault accessors
   930  type VaultAccessorsRequest struct {
   931  	Accessors []*VaultAccessor
   932  }
   933  
   934  // VaultAccessor is a reference to a created Vault token on behalf of
   935  // an allocation's task.
   936  type VaultAccessor struct {
   937  	AllocID     string
   938  	Task        string
   939  	NodeID      string
   940  	Accessor    string
   941  	CreationTTL int
   942  
   943  	// Raft Indexes
   944  	CreateIndex uint64
   945  }
   946  
   947  // DeriveVaultTokenResponse returns the wrapped tokens for each requested task
   948  type DeriveVaultTokenResponse struct {
   949  	// Tasks is a mapping between the task name and the wrapped token
   950  	Tasks map[string]string
   951  
   952  	// Error stores any error that occurred. Errors are stored here so we can
   953  	// communicate whether it is retryable
   954  	Error *RecoverableError
   955  
   956  	QueryMeta
   957  }
   958  
   959  // GenericRequest is used to request where no
   960  // specific information is needed.
   961  type GenericRequest struct {
   962  	QueryOptions
   963  }
   964  
   965  // DeploymentListRequest is used to list the deployments
   966  type DeploymentListRequest struct {
   967  	QueryOptions
   968  }
   969  
   970  // DeploymentDeleteRequest is used for deleting deployments.
   971  type DeploymentDeleteRequest struct {
   972  	Deployments []string
   973  	WriteRequest
   974  }
   975  
   976  // DeploymentStatusUpdateRequest is used to update the status of a deployment as
   977  // well as optionally creating an evaluation atomically.
   978  type DeploymentStatusUpdateRequest struct {
   979  	// Eval, if set, is used to create an evaluation at the same time as
   980  	// updating the status of a deployment.
   981  	Eval *Evaluation
   982  
   983  	// DeploymentUpdate is a status update to apply to the given
   984  	// deployment.
   985  	DeploymentUpdate *DeploymentStatusUpdate
   986  
   987  	// Job is used to optionally upsert a job. This is used when setting the
   988  	// allocation health results in a deployment failure and the deployment
   989  	// auto-reverts to the latest stable job.
   990  	Job *Job
   991  }
   992  
   993  // DeploymentAllocHealthRequest is used to set the health of a set of
   994  // allocations as part of a deployment.
   995  type DeploymentAllocHealthRequest struct {
   996  	DeploymentID string
   997  
   998  	// Marks these allocations as healthy, allow further allocations
   999  	// to be rolled.
  1000  	HealthyAllocationIDs []string
  1001  
  1002  	// Any unhealthy allocations fail the deployment
  1003  	UnhealthyAllocationIDs []string
  1004  
  1005  	WriteRequest
  1006  }
  1007  
  1008  // ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft
  1009  type ApplyDeploymentAllocHealthRequest struct {
  1010  	DeploymentAllocHealthRequest
  1011  
  1012  	// Timestamp is the timestamp to use when setting the allocations health.
  1013  	Timestamp time.Time
  1014  
  1015  	// An optional field to update the status of a deployment
  1016  	DeploymentUpdate *DeploymentStatusUpdate
  1017  
  1018  	// Job is used to optionally upsert a job. This is used when setting the
  1019  	// allocation health results in a deployment failure and the deployment
  1020  	// auto-reverts to the latest stable job.
  1021  	Job *Job
  1022  
  1023  	// An optional evaluation to create after promoting the canaries
  1024  	Eval *Evaluation
  1025  }
  1026  
  1027  // DeploymentPromoteRequest is used to promote task groups in a deployment
  1028  type DeploymentPromoteRequest struct {
  1029  	DeploymentID string
  1030  
  1031  	// All is to promote all task groups
  1032  	All bool
  1033  
  1034  	// Groups is used to set the promotion status per task group
  1035  	Groups []string
  1036  
  1037  	WriteRequest
  1038  }
  1039  
  1040  // ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft
  1041  type ApplyDeploymentPromoteRequest struct {
  1042  	DeploymentPromoteRequest
  1043  
  1044  	// An optional evaluation to create after promoting the canaries
  1045  	Eval *Evaluation
  1046  }
  1047  
  1048  // DeploymentPauseRequest is used to pause a deployment
  1049  type DeploymentPauseRequest struct {
  1050  	DeploymentID string
  1051  
  1052  	// Pause sets the pause status
  1053  	Pause bool
  1054  
  1055  	WriteRequest
  1056  }
  1057  
  1058  // DeploymentSpecificRequest is used to make a request specific to a particular
  1059  // deployment
  1060  type DeploymentSpecificRequest struct {
  1061  	DeploymentID string
  1062  	QueryOptions
  1063  }
  1064  
  1065  // DeploymentFailRequest is used to fail a particular deployment
  1066  type DeploymentFailRequest struct {
  1067  	DeploymentID string
  1068  	WriteRequest
  1069  }
  1070  
  1071  // ScalingPolicySpecificRequest is used when we just need to specify a target scaling policy
  1072  type ScalingPolicySpecificRequest struct {
  1073  	ID string
  1074  	QueryOptions
  1075  }
  1076  
  1077  // SingleScalingPolicyResponse is used to return a single job
  1078  type SingleScalingPolicyResponse struct {
  1079  	Policy *ScalingPolicy
  1080  	QueryMeta
  1081  }
  1082  
  1083  // ScalingPolicyListRequest is used to parameterize a scaling policy list request
  1084  type ScalingPolicyListRequest struct {
  1085  	QueryOptions
  1086  }
  1087  
  1088  // ScalingPolicyListResponse is used for a list request
  1089  type ScalingPolicyListResponse struct {
  1090  	Policies []*ScalingPolicyListStub
  1091  	QueryMeta
  1092  }
  1093  
  1094  // SingleDeploymentResponse is used to respond with a single deployment
  1095  type SingleDeploymentResponse struct {
  1096  	Deployment *Deployment
  1097  	QueryMeta
  1098  }
  1099  
  1100  // GenericResponse is used to respond to a request where no
  1101  // specific response information is needed.
  1102  type GenericResponse struct {
  1103  	WriteMeta
  1104  }
  1105  
  1106  // VersionResponse is used for the Status.Version response
  1107  type VersionResponse struct {
  1108  	Build    string
  1109  	Versions map[string]int
  1110  	QueryMeta
  1111  }
  1112  
  1113  // JobRegisterResponse is used to respond to a job registration
  1114  type JobRegisterResponse struct {
  1115  	EvalID          string
  1116  	EvalCreateIndex uint64
  1117  	JobModifyIndex  uint64
  1118  
  1119  	// Warnings contains any warnings about the given job. These may include
  1120  	// deprecation warnings.
  1121  	Warnings string
  1122  
  1123  	QueryMeta
  1124  }
  1125  
  1126  // JobDeregisterResponse is used to respond to a job deregistration
  1127  type JobDeregisterResponse struct {
  1128  	EvalID          string
  1129  	EvalCreateIndex uint64
  1130  	JobModifyIndex  uint64
  1131  	VolumeEvalID    string
  1132  	VolumeEvalIndex uint64
  1133  	QueryMeta
  1134  }
  1135  
  1136  // JobBatchDeregisterResponse is used to respond to a batch job deregistration
  1137  type JobBatchDeregisterResponse struct {
  1138  	// JobEvals maps the job to its created evaluation
  1139  	JobEvals map[NamespacedID]string
  1140  	QueryMeta
  1141  }
  1142  
  1143  // JobValidateResponse is the response from validate request
  1144  type JobValidateResponse struct {
  1145  	// DriverConfigValidated indicates whether the agent validated the driver
  1146  	// config
  1147  	DriverConfigValidated bool
  1148  
  1149  	// ValidationErrors is a list of validation errors
  1150  	ValidationErrors []string
  1151  
  1152  	// Error is a string version of any error that may have occurred
  1153  	Error string
  1154  
  1155  	// Warnings contains any warnings about the given job. These may include
  1156  	// deprecation warnings.
  1157  	Warnings string
  1158  }
  1159  
  1160  // NodeUpdateResponse is used to respond to a node update
  1161  type NodeUpdateResponse struct {
  1162  	HeartbeatTTL    time.Duration
  1163  	EvalIDs         []string
  1164  	EvalCreateIndex uint64
  1165  	NodeModifyIndex uint64
  1166  
  1167  	// LeaderRPCAddr is the RPC address of the current Raft Leader.  If
  1168  	// empty, the current Nomad Server is in the minority of a partition.
  1169  	LeaderRPCAddr string
  1170  
  1171  	// NumNodes is the number of Nomad nodes attached to this quorum of
  1172  	// Nomad Servers at the time of the response.  This value can
  1173  	// fluctuate based on the health of the cluster between heartbeats.
  1174  	NumNodes int32
  1175  
  1176  	// Servers is the full list of known Nomad servers in the local
  1177  	// region.
  1178  	Servers []*NodeServerInfo
  1179  
  1180  	QueryMeta
  1181  }
  1182  
  1183  // NodeDrainUpdateResponse is used to respond to a node drain update
  1184  type NodeDrainUpdateResponse struct {
  1185  	NodeModifyIndex uint64
  1186  	EvalIDs         []string
  1187  	EvalCreateIndex uint64
  1188  	WriteMeta
  1189  }
  1190  
  1191  // NodeEligibilityUpdateResponse is used to respond to a node eligibility update
  1192  type NodeEligibilityUpdateResponse struct {
  1193  	NodeModifyIndex uint64
  1194  	EvalIDs         []string
  1195  	EvalCreateIndex uint64
  1196  	WriteMeta
  1197  }
  1198  
  1199  // NodeAllocsResponse is used to return allocs for a single node
  1200  type NodeAllocsResponse struct {
  1201  	Allocs []*Allocation
  1202  	QueryMeta
  1203  }
  1204  
  1205  // NodeClientAllocsResponse is used to return allocs meta data for a single node
  1206  type NodeClientAllocsResponse struct {
  1207  	Allocs map[string]uint64
  1208  
  1209  	// MigrateTokens are used when ACLs are enabled to allow cross node,
  1210  	// authenticated access to sticky volumes
  1211  	MigrateTokens map[string]string
  1212  
  1213  	QueryMeta
  1214  }
  1215  
  1216  // SingleNodeResponse is used to return a single node
  1217  type SingleNodeResponse struct {
  1218  	Node *Node
  1219  	QueryMeta
  1220  }
  1221  
  1222  // NodeListResponse is used for a list request
  1223  type NodeListResponse struct {
  1224  	Nodes []*NodeListStub
  1225  	QueryMeta
  1226  }
  1227  
  1228  // SingleJobResponse is used to return a single job
  1229  type SingleJobResponse struct {
  1230  	Job *Job
  1231  	QueryMeta
  1232  }
  1233  
  1234  // JobSummaryResponse is used to return a single job summary
  1235  type JobSummaryResponse struct {
  1236  	JobSummary *JobSummary
  1237  	QueryMeta
  1238  }
  1239  
  1240  // JobScaleStatusResponse is used to return the scale status for a job
  1241  type JobScaleStatusResponse struct {
  1242  	JobScaleStatus *JobScaleStatus
  1243  	QueryMeta
  1244  }
  1245  
  1246  type JobScaleStatus struct {
  1247  	JobID          string
  1248  	JobCreateIndex uint64
  1249  	JobModifyIndex uint64
  1250  	JobStopped     bool
  1251  	TaskGroups     map[string]*TaskGroupScaleStatus
  1252  }
  1253  
  1254  // TaskGroupScaleStatus is used to return the scale status for a given task group
  1255  type TaskGroupScaleStatus struct {
  1256  	Desired   int
  1257  	Placed    int
  1258  	Running   int
  1259  	Healthy   int
  1260  	Unhealthy int
  1261  	Events    []*ScalingEvent
  1262  }
  1263  
  1264  type JobDispatchResponse struct {
  1265  	DispatchedJobID string
  1266  	EvalID          string
  1267  	EvalCreateIndex uint64
  1268  	JobCreateIndex  uint64
  1269  	WriteMeta
  1270  }
  1271  
  1272  // JobListResponse is used for a list request
  1273  type JobListResponse struct {
  1274  	Jobs []*JobListStub
  1275  	QueryMeta
  1276  }
  1277  
  1278  // JobVersionsRequest is used to get a jobs versions
  1279  type JobVersionsRequest struct {
  1280  	JobID string
  1281  	Diffs bool
  1282  	QueryOptions
  1283  }
  1284  
  1285  // JobVersionsResponse is used for a job get versions request
  1286  type JobVersionsResponse struct {
  1287  	Versions []*Job
  1288  	Diffs    []*JobDiff
  1289  	QueryMeta
  1290  }
  1291  
  1292  // JobPlanResponse is used to respond to a job plan request
  1293  type JobPlanResponse struct {
  1294  	// Annotations stores annotations explaining decisions the scheduler made.
  1295  	Annotations *PlanAnnotations
  1296  
  1297  	// FailedTGAllocs is the placement failures per task group.
  1298  	FailedTGAllocs map[string]*AllocMetric
  1299  
  1300  	// JobModifyIndex is the modification index of the job. The value can be
  1301  	// used when running `nomad run` to ensure that the Job wasn’t modified
  1302  	// since the last plan. If the job is being created, the value is zero.
  1303  	JobModifyIndex uint64
  1304  
  1305  	// CreatedEvals is the set of evaluations created by the scheduler. The
  1306  	// reasons for this can be rolling-updates or blocked evals.
  1307  	CreatedEvals []*Evaluation
  1308  
  1309  	// Diff contains the diff of the job and annotations on whether the change
  1310  	// causes an in-place update or create/destroy
  1311  	Diff *JobDiff
  1312  
  1313  	// NextPeriodicLaunch is the time duration till the job would be launched if
  1314  	// submitted.
  1315  	NextPeriodicLaunch time.Time
  1316  
  1317  	// Warnings contains any warnings about the given job. These may include
  1318  	// deprecation warnings.
  1319  	Warnings string
  1320  
  1321  	WriteMeta
  1322  }
  1323  
  1324  // SingleAllocResponse is used to return a single allocation
  1325  type SingleAllocResponse struct {
  1326  	Alloc *Allocation
  1327  	QueryMeta
  1328  }
  1329  
  1330  // AllocsGetResponse is used to return a set of allocations
  1331  type AllocsGetResponse struct {
  1332  	Allocs []*Allocation
  1333  	QueryMeta
  1334  }
  1335  
  1336  // JobAllocationsResponse is used to return the allocations for a job
  1337  type JobAllocationsResponse struct {
  1338  	Allocations []*AllocListStub
  1339  	QueryMeta
  1340  }
  1341  
  1342  // JobEvaluationsResponse is used to return the evaluations for a job
  1343  type JobEvaluationsResponse struct {
  1344  	Evaluations []*Evaluation
  1345  	QueryMeta
  1346  }
  1347  
  1348  // SingleEvalResponse is used to return a single evaluation
  1349  type SingleEvalResponse struct {
  1350  	Eval *Evaluation
  1351  	QueryMeta
  1352  }
  1353  
  1354  // EvalDequeueResponse is used to return from a dequeue
  1355  type EvalDequeueResponse struct {
  1356  	Eval  *Evaluation
  1357  	Token string
  1358  
  1359  	// WaitIndex is the Raft index the worker should wait until invoking the
  1360  	// scheduler.
  1361  	WaitIndex uint64
  1362  
  1363  	QueryMeta
  1364  }
  1365  
  1366  // GetWaitIndex is used to retrieve the Raft index in which state should be at
  1367  // or beyond before invoking the scheduler.
  1368  func (e *EvalDequeueResponse) GetWaitIndex() uint64 {
  1369  	// Prefer the wait index sent. This will be populated on all responses from
  1370  	// 0.7.0 and above
  1371  	if e.WaitIndex != 0 {
  1372  		return e.WaitIndex
  1373  	} else if e.Eval != nil {
  1374  		return e.Eval.ModifyIndex
  1375  	}
  1376  
  1377  	// This should never happen
  1378  	return 1
  1379  }
  1380  
  1381  // PlanResponse is used to return from a PlanRequest
  1382  type PlanResponse struct {
  1383  	Result *PlanResult
  1384  	WriteMeta
  1385  }
  1386  
  1387  // AllocListResponse is used for a list request
  1388  type AllocListResponse struct {
  1389  	Allocations []*AllocListStub
  1390  	QueryMeta
  1391  }
  1392  
  1393  // DeploymentListResponse is used for a list request
  1394  type DeploymentListResponse struct {
  1395  	Deployments []*Deployment
  1396  	QueryMeta
  1397  }
  1398  
  1399  // EvalListResponse is used for a list request
  1400  type EvalListResponse struct {
  1401  	Evaluations []*Evaluation
  1402  	QueryMeta
  1403  }
  1404  
  1405  // EvalAllocationsResponse is used to return the allocations for an evaluation
  1406  type EvalAllocationsResponse struct {
  1407  	Allocations []*AllocListStub
  1408  	QueryMeta
  1409  }
  1410  
  1411  // PeriodicForceResponse is used to respond to a periodic job force launch
  1412  type PeriodicForceResponse struct {
  1413  	EvalID          string
  1414  	EvalCreateIndex uint64
  1415  	WriteMeta
  1416  }
  1417  
  1418  // DeploymentUpdateResponse is used to respond to a deployment change. The
  1419  // response will include the modify index of the deployment as well as details
  1420  // of any triggered evaluation.
  1421  type DeploymentUpdateResponse struct {
  1422  	EvalID                string
  1423  	EvalCreateIndex       uint64
  1424  	DeploymentModifyIndex uint64
  1425  
  1426  	// RevertedJobVersion is the version the job was reverted to. If unset, the
  1427  	// job wasn't reverted
  1428  	RevertedJobVersion *uint64
  1429  
  1430  	WriteMeta
  1431  }
  1432  
  1433  // NodeConnQueryResponse is used to respond to a query of whether a server has
  1434  // a connection to a specific Node
  1435  type NodeConnQueryResponse struct {
  1436  	// Connected indicates whether a connection to the Client exists
  1437  	Connected bool
  1438  
  1439  	// Established marks the time at which the connection was established
  1440  	Established time.Time
  1441  
  1442  	QueryMeta
  1443  }
  1444  
  1445  // EmitNodeEventsRequest is a request to update the node events source
  1446  // with a new client-side event
  1447  type EmitNodeEventsRequest struct {
  1448  	// NodeEvents are a map where the key is a node id, and value is a list of
  1449  	// events for that node
  1450  	NodeEvents map[string][]*NodeEvent
  1451  
  1452  	WriteRequest
  1453  }
  1454  
  1455  // EmitNodeEventsResponse is a response to the client about the status of
  1456  // the node event source update.
  1457  type EmitNodeEventsResponse struct {
  1458  	WriteMeta
  1459  }
  1460  
  1461  const (
  1462  	NodeEventSubsystemDrain     = "Drain"
  1463  	NodeEventSubsystemDriver    = "Driver"
  1464  	NodeEventSubsystemHeartbeat = "Heartbeat"
  1465  	NodeEventSubsystemCluster   = "Cluster"
  1466  	NodeEventSubsystemStorage   = "Storage"
  1467  )
  1468  
  1469  // NodeEvent is a single unit representing a node’s state change
  1470  type NodeEvent struct {
  1471  	Message     string
  1472  	Subsystem   string
  1473  	Details     map[string]string
  1474  	Timestamp   time.Time
  1475  	CreateIndex uint64
  1476  }
  1477  
  1478  func (ne *NodeEvent) String() string {
  1479  	var details []string
  1480  	for k, v := range ne.Details {
  1481  		details = append(details, fmt.Sprintf("%s: %s", k, v))
  1482  	}
  1483  
  1484  	return fmt.Sprintf("Message: %s, Subsystem: %s, Details: %s, Timestamp: %s", ne.Message, ne.Subsystem, strings.Join(details, ","), ne.Timestamp.String())
  1485  }
  1486  
  1487  func (ne *NodeEvent) Copy() *NodeEvent {
  1488  	c := new(NodeEvent)
  1489  	*c = *ne
  1490  	c.Details = helper.CopyMapStringString(ne.Details)
  1491  	return c
  1492  }
  1493  
  1494  // NewNodeEvent generates a new node event storing the current time as the
  1495  // timestamp
  1496  func NewNodeEvent() *NodeEvent {
  1497  	return &NodeEvent{Timestamp: time.Now()}
  1498  }
  1499  
  1500  // SetMessage is used to set the message on the node event
  1501  func (ne *NodeEvent) SetMessage(msg string) *NodeEvent {
  1502  	ne.Message = msg
  1503  	return ne
  1504  }
  1505  
  1506  // SetSubsystem is used to set the subsystem on the node event
  1507  func (ne *NodeEvent) SetSubsystem(sys string) *NodeEvent {
  1508  	ne.Subsystem = sys
  1509  	return ne
  1510  }
  1511  
  1512  // SetTimestamp is used to set the timestamp on the node event
  1513  func (ne *NodeEvent) SetTimestamp(ts time.Time) *NodeEvent {
  1514  	ne.Timestamp = ts
  1515  	return ne
  1516  }
  1517  
  1518  // AddDetail is used to add a detail to the node event
  1519  func (ne *NodeEvent) AddDetail(k, v string) *NodeEvent {
  1520  	if ne.Details == nil {
  1521  		ne.Details = make(map[string]string, 1)
  1522  	}
  1523  	ne.Details[k] = v
  1524  	return ne
  1525  }
  1526  
  1527  const (
  1528  	NodeStatusInit  = "initializing"
  1529  	NodeStatusReady = "ready"
  1530  	NodeStatusDown  = "down"
  1531  )
  1532  
  1533  // ShouldDrainNode checks if a given node status should trigger an
  1534  // evaluation. Some states don't require any further action.
  1535  func ShouldDrainNode(status string) bool {
  1536  	switch status {
  1537  	case NodeStatusInit, NodeStatusReady:
  1538  		return false
  1539  	case NodeStatusDown:
  1540  		return true
  1541  	default:
  1542  		panic(fmt.Sprintf("unhandled node status %s", status))
  1543  	}
  1544  }
  1545  
  1546  // ValidNodeStatus is used to check if a node status is valid
  1547  func ValidNodeStatus(status string) bool {
  1548  	switch status {
  1549  	case NodeStatusInit, NodeStatusReady, NodeStatusDown:
  1550  		return true
  1551  	default:
  1552  		return false
  1553  	}
  1554  }
  1555  
  1556  const (
  1557  	// NodeSchedulingEligible and Ineligible marks the node as eligible or not,
  1558  	// respectively, for receiving allocations. This is orthoginal to the node
  1559  	// status being ready.
  1560  	NodeSchedulingEligible   = "eligible"
  1561  	NodeSchedulingIneligible = "ineligible"
  1562  )
  1563  
  1564  // DrainSpec describes a Node's desired drain behavior.
  1565  type DrainSpec struct {
  1566  	// Deadline is the duration after StartTime when the remaining
  1567  	// allocations on a draining Node should be told to stop.
  1568  	Deadline time.Duration
  1569  
  1570  	// IgnoreSystemJobs allows systems jobs to remain on the node even though it
  1571  	// has been marked for draining.
  1572  	IgnoreSystemJobs bool
  1573  }
  1574  
  1575  // DrainStrategy describes a Node's drain behavior.
  1576  type DrainStrategy struct {
  1577  	// DrainSpec is the user declared drain specification
  1578  	DrainSpec
  1579  
  1580  	// ForceDeadline is the deadline time for the drain after which drains will
  1581  	// be forced
  1582  	ForceDeadline time.Time
  1583  
  1584  	// StartedAt is the time the drain process started
  1585  	StartedAt time.Time
  1586  }
  1587  
  1588  func (d *DrainStrategy) Copy() *DrainStrategy {
  1589  	if d == nil {
  1590  		return nil
  1591  	}
  1592  
  1593  	nd := new(DrainStrategy)
  1594  	*nd = *d
  1595  	return nd
  1596  }
  1597  
  1598  // DeadlineTime returns a boolean whether the drain strategy allows an infinite
  1599  // duration or otherwise the deadline time. The force drain is captured by the
  1600  // deadline time being in the past.
  1601  func (d *DrainStrategy) DeadlineTime() (infinite bool, deadline time.Time) {
  1602  	// Treat the nil case as a force drain so during an upgrade where a node may
  1603  	// not have a drain strategy but has Drain set to true, it is treated as a
  1604  	// force to mimick old behavior.
  1605  	if d == nil {
  1606  		return false, time.Time{}
  1607  	}
  1608  
  1609  	ns := d.Deadline.Nanoseconds()
  1610  	switch {
  1611  	case ns < 0: // Force
  1612  		return false, time.Time{}
  1613  	case ns == 0: // Infinite
  1614  		return true, time.Time{}
  1615  	default:
  1616  		return false, d.ForceDeadline
  1617  	}
  1618  }
  1619  
  1620  func (d *DrainStrategy) Equal(o *DrainStrategy) bool {
  1621  	if d == nil && o == nil {
  1622  		return true
  1623  	} else if o != nil && d == nil {
  1624  		return false
  1625  	} else if d != nil && o == nil {
  1626  		return false
  1627  	}
  1628  
  1629  	// Compare values
  1630  	if d.ForceDeadline != o.ForceDeadline {
  1631  		return false
  1632  	} else if d.Deadline != o.Deadline {
  1633  		return false
  1634  	} else if d.IgnoreSystemJobs != o.IgnoreSystemJobs {
  1635  		return false
  1636  	}
  1637  
  1638  	return true
  1639  }
  1640  
  1641  // Node is a representation of a schedulable client node
  1642  type Node struct {
  1643  	// ID is a unique identifier for the node. It can be constructed
  1644  	// by doing a concatenation of the Name and Datacenter as a simple
  1645  	// approach. Alternatively a UUID may be used.
  1646  	ID string
  1647  
  1648  	// SecretID is an ID that is only known by the Node and the set of Servers.
  1649  	// It is not accessible via the API and is used to authenticate nodes
  1650  	// conducting privileged activities.
  1651  	SecretID string
  1652  
  1653  	// Datacenter for this node
  1654  	Datacenter string
  1655  
  1656  	// Node name
  1657  	Name string
  1658  
  1659  	// HTTPAddr is the address on which the Nomad client is listening for http
  1660  	// requests
  1661  	HTTPAddr string
  1662  
  1663  	// TLSEnabled indicates if the Agent has TLS enabled for the HTTP API
  1664  	TLSEnabled bool
  1665  
  1666  	// Attributes is an arbitrary set of key/value
  1667  	// data that can be used for constraints. Examples
  1668  	// include "kernel.name=linux", "arch=386", "driver.docker=1",
  1669  	// "docker.runtime=1.8.3"
  1670  	Attributes map[string]string
  1671  
  1672  	// NodeResources captures the available resources on the client.
  1673  	NodeResources *NodeResources
  1674  
  1675  	// ReservedResources captures the set resources on the client that are
  1676  	// reserved from scheduling.
  1677  	ReservedResources *NodeReservedResources
  1678  
  1679  	// Resources is the available resources on the client.
  1680  	// For example 'cpu=2' 'memory=2048'
  1681  	// COMPAT(0.10): Remove in 0.10
  1682  	Resources *Resources
  1683  
  1684  	// Reserved is the set of resources that are reserved,
  1685  	// and should be subtracted from the total resources for
  1686  	// the purposes of scheduling. This may be provide certain
  1687  	// high-watermark tolerances or because of external schedulers
  1688  	// consuming resources.
  1689  	Reserved *Resources
  1690  
  1691  	// Links are used to 'link' this client to external
  1692  	// systems. For example 'consul=foo.dc1' 'aws=i-83212'
  1693  	// 'ami=ami-123'
  1694  	Links map[string]string
  1695  
  1696  	// Meta is used to associate arbitrary metadata with this
  1697  	// client. This is opaque to Nomad.
  1698  	Meta map[string]string
  1699  
  1700  	// NodeClass is an opaque identifier used to group nodes
  1701  	// together for the purpose of determining scheduling pressure.
  1702  	NodeClass string
  1703  
  1704  	// ComputedClass is a unique id that identifies nodes with a common set of
  1705  	// attributes and capabilities.
  1706  	ComputedClass string
  1707  
  1708  	// COMPAT: Remove in Nomad 0.9
  1709  	// Drain is controlled by the servers, and not the client.
  1710  	// If true, no jobs will be scheduled to this node, and existing
  1711  	// allocations will be drained. Superseded by DrainStrategy in Nomad
  1712  	// 0.8 but kept for backward compat.
  1713  	Drain bool
  1714  
  1715  	// DrainStrategy determines the node's draining behavior. Will be nil
  1716  	// when Drain=false.
  1717  	DrainStrategy *DrainStrategy
  1718  
  1719  	// SchedulingEligibility determines whether this node will receive new
  1720  	// placements.
  1721  	SchedulingEligibility string
  1722  
  1723  	// Status of this node
  1724  	Status string
  1725  
  1726  	// StatusDescription is meant to provide more human useful information
  1727  	StatusDescription string
  1728  
  1729  	// StatusUpdatedAt is the time stamp at which the state of the node was
  1730  	// updated
  1731  	StatusUpdatedAt int64
  1732  
  1733  	// Events is the most recent set of events generated for the node,
  1734  	// retaining only MaxRetainedNodeEvents number at a time
  1735  	Events []*NodeEvent
  1736  
  1737  	// Drivers is a map of driver names to current driver information
  1738  	Drivers map[string]*DriverInfo
  1739  
  1740  	// CSIControllerPlugins is a map of plugin names to current CSI Plugin info
  1741  	CSIControllerPlugins map[string]*CSIInfo
  1742  	// CSINodePlugins is a map of plugin names to current CSI Plugin info
  1743  	CSINodePlugins map[string]*CSIInfo
  1744  
  1745  	// HostVolumes is a map of host volume names to their configuration
  1746  	HostVolumes map[string]*ClientHostVolumeConfig
  1747  
  1748  	// Raft Indexes
  1749  	CreateIndex uint64
  1750  	ModifyIndex uint64
  1751  }
  1752  
  1753  // Ready returns true if the node is ready for running allocations
  1754  func (n *Node) Ready() bool {
  1755  	// Drain is checked directly to support pre-0.8 Node data
  1756  	return n.Status == NodeStatusReady && !n.Drain && n.SchedulingEligibility == NodeSchedulingEligible
  1757  }
  1758  
  1759  func (n *Node) Canonicalize() {
  1760  	if n == nil {
  1761  		return
  1762  	}
  1763  
  1764  	// COMPAT Remove in 0.10
  1765  	// In v0.8.0 we introduced scheduling eligibility, so we need to set it for
  1766  	// upgrading nodes
  1767  	if n.SchedulingEligibility == "" {
  1768  		if n.Drain {
  1769  			n.SchedulingEligibility = NodeSchedulingIneligible
  1770  		} else {
  1771  			n.SchedulingEligibility = NodeSchedulingEligible
  1772  		}
  1773  	}
  1774  }
  1775  
  1776  func (n *Node) Copy() *Node {
  1777  	if n == nil {
  1778  		return nil
  1779  	}
  1780  	nn := new(Node)
  1781  	*nn = *n
  1782  	nn.Attributes = helper.CopyMapStringString(nn.Attributes)
  1783  	nn.Resources = nn.Resources.Copy()
  1784  	nn.Reserved = nn.Reserved.Copy()
  1785  	nn.NodeResources = nn.NodeResources.Copy()
  1786  	nn.ReservedResources = nn.ReservedResources.Copy()
  1787  	nn.Links = helper.CopyMapStringString(nn.Links)
  1788  	nn.Meta = helper.CopyMapStringString(nn.Meta)
  1789  	nn.Events = copyNodeEvents(n.Events)
  1790  	nn.DrainStrategy = nn.DrainStrategy.Copy()
  1791  	nn.CSIControllerPlugins = copyNodeCSI(nn.CSIControllerPlugins)
  1792  	nn.CSINodePlugins = copyNodeCSI(nn.CSINodePlugins)
  1793  	nn.Drivers = copyNodeDrivers(n.Drivers)
  1794  	nn.HostVolumes = copyNodeHostVolumes(n.HostVolumes)
  1795  	return nn
  1796  }
  1797  
  1798  // copyNodeEvents is a helper to copy a list of NodeEvent's
  1799  func copyNodeEvents(events []*NodeEvent) []*NodeEvent {
  1800  	l := len(events)
  1801  	if l == 0 {
  1802  		return nil
  1803  	}
  1804  
  1805  	c := make([]*NodeEvent, l)
  1806  	for i, event := range events {
  1807  		c[i] = event.Copy()
  1808  	}
  1809  	return c
  1810  }
  1811  
  1812  // copyNodeCSI is a helper to copy a map of CSIInfo
  1813  func copyNodeCSI(plugins map[string]*CSIInfo) map[string]*CSIInfo {
  1814  	l := len(plugins)
  1815  	if l == 0 {
  1816  		return nil
  1817  	}
  1818  
  1819  	c := make(map[string]*CSIInfo, l)
  1820  	for plugin, info := range plugins {
  1821  		c[plugin] = info.Copy()
  1822  	}
  1823  
  1824  	return c
  1825  }
  1826  
  1827  // copyNodeDrivers is a helper to copy a map of DriverInfo
  1828  func copyNodeDrivers(drivers map[string]*DriverInfo) map[string]*DriverInfo {
  1829  	l := len(drivers)
  1830  	if l == 0 {
  1831  		return nil
  1832  	}
  1833  
  1834  	c := make(map[string]*DriverInfo, l)
  1835  	for driver, info := range drivers {
  1836  		c[driver] = info.Copy()
  1837  	}
  1838  	return c
  1839  }
  1840  
  1841  // copyNodeHostVolumes is a helper to copy a map of string to Volume
  1842  func copyNodeHostVolumes(volumes map[string]*ClientHostVolumeConfig) map[string]*ClientHostVolumeConfig {
  1843  	l := len(volumes)
  1844  	if l == 0 {
  1845  		return nil
  1846  	}
  1847  
  1848  	c := make(map[string]*ClientHostVolumeConfig, l)
  1849  	for volume, v := range volumes {
  1850  		c[volume] = v.Copy()
  1851  	}
  1852  
  1853  	return c
  1854  }
  1855  
  1856  // TerminalStatus returns if the current status is terminal and
  1857  // will no longer transition.
  1858  func (n *Node) TerminalStatus() bool {
  1859  	switch n.Status {
  1860  	case NodeStatusDown:
  1861  		return true
  1862  	default:
  1863  		return false
  1864  	}
  1865  }
  1866  
  1867  // COMPAT(0.11): Remove in 0.11
  1868  // ComparableReservedResources returns the reserved resouces on the node
  1869  // handling upgrade paths. Reserved networks must be handled separately. After
  1870  // 0.11 calls to this should be replaced with:
  1871  // node.ReservedResources.Comparable()
  1872  func (n *Node) ComparableReservedResources() *ComparableResources {
  1873  	// See if we can no-op
  1874  	if n.Reserved == nil && n.ReservedResources == nil {
  1875  		return nil
  1876  	}
  1877  
  1878  	// Node already has 0.9+ behavior
  1879  	if n.ReservedResources != nil {
  1880  		return n.ReservedResources.Comparable()
  1881  	}
  1882  
  1883  	// Upgrade path
  1884  	return &ComparableResources{
  1885  		Flattened: AllocatedTaskResources{
  1886  			Cpu: AllocatedCpuResources{
  1887  				CpuShares: int64(n.Reserved.CPU),
  1888  			},
  1889  			Memory: AllocatedMemoryResources{
  1890  				MemoryMB: int64(n.Reserved.MemoryMB),
  1891  			},
  1892  		},
  1893  		Shared: AllocatedSharedResources{
  1894  			DiskMB: int64(n.Reserved.DiskMB),
  1895  		},
  1896  	}
  1897  }
  1898  
  1899  // COMPAT(0.11): Remove in 0.11
  1900  // ComparableResources returns the resouces on the node
  1901  // handling upgrade paths. Networking must be handled separately. After 0.11
  1902  // calls to this should be replaced with: node.NodeResources.Comparable()
  1903  func (n *Node) ComparableResources() *ComparableResources {
  1904  	// Node already has 0.9+ behavior
  1905  	if n.NodeResources != nil {
  1906  		return n.NodeResources.Comparable()
  1907  	}
  1908  
  1909  	// Upgrade path
  1910  	return &ComparableResources{
  1911  		Flattened: AllocatedTaskResources{
  1912  			Cpu: AllocatedCpuResources{
  1913  				CpuShares: int64(n.Resources.CPU),
  1914  			},
  1915  			Memory: AllocatedMemoryResources{
  1916  				MemoryMB: int64(n.Resources.MemoryMB),
  1917  			},
  1918  		},
  1919  		Shared: AllocatedSharedResources{
  1920  			DiskMB: int64(n.Resources.DiskMB),
  1921  		},
  1922  	}
  1923  }
  1924  
  1925  // Stub returns a summarized version of the node
  1926  func (n *Node) Stub() *NodeListStub {
  1927  
  1928  	addr, _, _ := net.SplitHostPort(n.HTTPAddr)
  1929  
  1930  	return &NodeListStub{
  1931  		Address:               addr,
  1932  		ID:                    n.ID,
  1933  		Datacenter:            n.Datacenter,
  1934  		Name:                  n.Name,
  1935  		NodeClass:             n.NodeClass,
  1936  		Version:               n.Attributes["nomad.version"],
  1937  		Drain:                 n.Drain,
  1938  		SchedulingEligibility: n.SchedulingEligibility,
  1939  		Status:                n.Status,
  1940  		StatusDescription:     n.StatusDescription,
  1941  		Drivers:               n.Drivers,
  1942  		HostVolumes:           n.HostVolumes,
  1943  		CreateIndex:           n.CreateIndex,
  1944  		ModifyIndex:           n.ModifyIndex,
  1945  	}
  1946  }
  1947  
  1948  // NodeListStub is used to return a subset of job information
  1949  // for the job list
  1950  type NodeListStub struct {
  1951  	Address               string
  1952  	ID                    string
  1953  	Datacenter            string
  1954  	Name                  string
  1955  	NodeClass             string
  1956  	Version               string
  1957  	Drain                 bool
  1958  	SchedulingEligibility string
  1959  	Status                string
  1960  	StatusDescription     string
  1961  	Drivers               map[string]*DriverInfo
  1962  	HostVolumes           map[string]*ClientHostVolumeConfig
  1963  	CreateIndex           uint64
  1964  	ModifyIndex           uint64
  1965  }
  1966  
  1967  // Resources is used to define the resources available
  1968  // on a client
  1969  type Resources struct {
  1970  	CPU      int
  1971  	MemoryMB int
  1972  	DiskMB   int
  1973  	IOPS     int // COMPAT(0.10): Only being used to issue warnings
  1974  	Networks Networks
  1975  	Devices  ResourceDevices
  1976  }
  1977  
  1978  const (
  1979  	BytesInMegabyte = 1024 * 1024
  1980  )
  1981  
  1982  // DefaultResources is a small resources object that contains the
  1983  // default resources requests that we will provide to an object.
  1984  // ---  THIS FUNCTION IS REPLICATED IN api/resources.go and should
  1985  // be kept in sync.
  1986  func DefaultResources() *Resources {
  1987  	return &Resources{
  1988  		CPU:      100,
  1989  		MemoryMB: 300,
  1990  	}
  1991  }
  1992  
  1993  // MinResources is a small resources object that contains the
  1994  // absolute minimum resources that we will provide to an object.
  1995  // This should not be confused with the defaults which are
  1996  // provided in Canonicalize() ---  THIS FUNCTION IS REPLICATED IN
  1997  // api/resources.go and should be kept in sync.
  1998  func MinResources() *Resources {
  1999  	return &Resources{
  2000  		CPU:      20,
  2001  		MemoryMB: 10,
  2002  	}
  2003  }
  2004  
  2005  // DiskInBytes returns the amount of disk resources in bytes.
  2006  func (r *Resources) DiskInBytes() int64 {
  2007  	return int64(r.DiskMB * BytesInMegabyte)
  2008  }
  2009  
  2010  func (r *Resources) Validate() error {
  2011  	var mErr multierror.Error
  2012  	if err := r.MeetsMinResources(); err != nil {
  2013  		mErr.Errors = append(mErr.Errors, err)
  2014  	}
  2015  
  2016  	// Ensure the task isn't asking for disk resources
  2017  	if r.DiskMB > 0 {
  2018  		mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level."))
  2019  	}
  2020  
  2021  	for i, d := range r.Devices {
  2022  		if err := d.Validate(); err != nil {
  2023  			mErr.Errors = append(mErr.Errors, fmt.Errorf("device %d failed validation: %v", i+1, err))
  2024  		}
  2025  	}
  2026  
  2027  	return mErr.ErrorOrNil()
  2028  }
  2029  
  2030  // Merge merges this resource with another resource.
  2031  // COMPAT(0.10): Remove in 0.10
  2032  func (r *Resources) Merge(other *Resources) {
  2033  	if other.CPU != 0 {
  2034  		r.CPU = other.CPU
  2035  	}
  2036  	if other.MemoryMB != 0 {
  2037  		r.MemoryMB = other.MemoryMB
  2038  	}
  2039  	if other.DiskMB != 0 {
  2040  		r.DiskMB = other.DiskMB
  2041  	}
  2042  	if len(other.Networks) != 0 {
  2043  		r.Networks = other.Networks
  2044  	}
  2045  	if len(other.Devices) != 0 {
  2046  		r.Devices = other.Devices
  2047  	}
  2048  }
  2049  
  2050  // COMPAT(0.10): Remove in 0.10
  2051  func (r *Resources) Equals(o *Resources) bool {
  2052  	if r == o {
  2053  		return true
  2054  	}
  2055  	if r == nil || o == nil {
  2056  		return false
  2057  	}
  2058  	return r.CPU == o.CPU &&
  2059  		r.MemoryMB == o.MemoryMB &&
  2060  		r.DiskMB == o.DiskMB &&
  2061  		r.IOPS == o.IOPS &&
  2062  		r.Networks.Equals(&o.Networks) &&
  2063  		r.Devices.Equals(&o.Devices)
  2064  }
  2065  
  2066  // COMPAT(0.10): Remove in 0.10
  2067  // ResourceDevices are part of Resources
  2068  type ResourceDevices []*RequestedDevice
  2069  
  2070  // COMPAT(0.10): Remove in 0.10
  2071  // Equals ResourceDevices as set keyed by Name
  2072  func (d *ResourceDevices) Equals(o *ResourceDevices) bool {
  2073  	if d == o {
  2074  		return true
  2075  	}
  2076  	if d == nil || o == nil {
  2077  		return false
  2078  	}
  2079  	if len(*d) != len(*o) {
  2080  		return false
  2081  	}
  2082  	m := make(map[string]*RequestedDevice, len(*d))
  2083  	for _, e := range *d {
  2084  		m[e.Name] = e
  2085  	}
  2086  	for _, oe := range *o {
  2087  		de, ok := m[oe.Name]
  2088  		if !ok || !de.Equals(oe) {
  2089  			return false
  2090  		}
  2091  	}
  2092  	return true
  2093  }
  2094  
  2095  // COMPAT(0.10): Remove in 0.10
  2096  func (r *Resources) Canonicalize() {
  2097  	// Ensure that an empty and nil slices are treated the same to avoid scheduling
  2098  	// problems since we use reflect DeepEquals.
  2099  	if len(r.Networks) == 0 {
  2100  		r.Networks = nil
  2101  	}
  2102  	if len(r.Devices) == 0 {
  2103  		r.Devices = nil
  2104  	}
  2105  
  2106  	for _, n := range r.Networks {
  2107  		n.Canonicalize()
  2108  	}
  2109  }
  2110  
  2111  // MeetsMinResources returns an error if the resources specified are less than
  2112  // the minimum allowed.
  2113  // This is based on the minimums defined in the Resources type
  2114  // COMPAT(0.10): Remove in 0.10
  2115  func (r *Resources) MeetsMinResources() error {
  2116  	var mErr multierror.Error
  2117  	minResources := MinResources()
  2118  	if r.CPU < minResources.CPU {
  2119  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is %d; got %d", minResources.CPU, r.CPU))
  2120  	}
  2121  	if r.MemoryMB < minResources.MemoryMB {
  2122  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is %d; got %d", minResources.MemoryMB, r.MemoryMB))
  2123  	}
  2124  	for i, n := range r.Networks {
  2125  		if err := n.MeetsMinResources(); err != nil {
  2126  			mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err))
  2127  		}
  2128  	}
  2129  
  2130  	return mErr.ErrorOrNil()
  2131  }
  2132  
  2133  // Copy returns a deep copy of the resources
  2134  func (r *Resources) Copy() *Resources {
  2135  	if r == nil {
  2136  		return nil
  2137  	}
  2138  	newR := new(Resources)
  2139  	*newR = *r
  2140  
  2141  	// Copy the network objects
  2142  	newR.Networks = r.Networks.Copy()
  2143  
  2144  	// Copy the devices
  2145  	if r.Devices != nil {
  2146  		n := len(r.Devices)
  2147  		newR.Devices = make([]*RequestedDevice, n)
  2148  		for i := 0; i < n; i++ {
  2149  			newR.Devices[i] = r.Devices[i].Copy()
  2150  		}
  2151  	}
  2152  
  2153  	return newR
  2154  }
  2155  
  2156  // NetIndex finds the matching net index using device name
  2157  // COMPAT(0.10): Remove in 0.10
  2158  func (r *Resources) NetIndex(n *NetworkResource) int {
  2159  	return r.Networks.NetIndex(n)
  2160  }
  2161  
  2162  // Superset checks if one set of resources is a superset
  2163  // of another. This ignores network resources, and the NetworkIndex
  2164  // should be used for that.
  2165  // COMPAT(0.10): Remove in 0.10
  2166  func (r *Resources) Superset(other *Resources) (bool, string) {
  2167  	if r.CPU < other.CPU {
  2168  		return false, "cpu"
  2169  	}
  2170  	if r.MemoryMB < other.MemoryMB {
  2171  		return false, "memory"
  2172  	}
  2173  	if r.DiskMB < other.DiskMB {
  2174  		return false, "disk"
  2175  	}
  2176  	return true, ""
  2177  }
  2178  
  2179  // Add adds the resources of the delta to this, potentially
  2180  // returning an error if not possible.
  2181  // COMPAT(0.10): Remove in 0.10
  2182  func (r *Resources) Add(delta *Resources) error {
  2183  	if delta == nil {
  2184  		return nil
  2185  	}
  2186  	r.CPU += delta.CPU
  2187  	r.MemoryMB += delta.MemoryMB
  2188  	r.DiskMB += delta.DiskMB
  2189  
  2190  	for _, n := range delta.Networks {
  2191  		// Find the matching interface by IP or CIDR
  2192  		idx := r.NetIndex(n)
  2193  		if idx == -1 {
  2194  			r.Networks = append(r.Networks, n.Copy())
  2195  		} else {
  2196  			r.Networks[idx].Add(n)
  2197  		}
  2198  	}
  2199  	return nil
  2200  }
  2201  
  2202  // COMPAT(0.10): Remove in 0.10
  2203  func (r *Resources) GoString() string {
  2204  	return fmt.Sprintf("*%#v", *r)
  2205  }
  2206  
  2207  type Port struct {
  2208  	Label string
  2209  	Value int
  2210  	To    int
  2211  }
  2212  
  2213  // NetworkResource is used to represent available network
  2214  // resources
  2215  type NetworkResource struct {
  2216  	Mode          string // Mode of the network
  2217  	Device        string // Name of the device
  2218  	CIDR          string // CIDR block of addresses
  2219  	IP            string // Host IP address
  2220  	MBits         int    // Throughput
  2221  	ReservedPorts []Port // Host Reserved ports
  2222  	DynamicPorts  []Port // Host Dynamically assigned ports
  2223  }
  2224  
  2225  func (nr *NetworkResource) Equals(other *NetworkResource) bool {
  2226  	if nr.Mode != other.Mode {
  2227  		return false
  2228  	}
  2229  
  2230  	if nr.Device != other.Device {
  2231  		return false
  2232  	}
  2233  
  2234  	if nr.CIDR != other.CIDR {
  2235  		return false
  2236  	}
  2237  
  2238  	if nr.IP != other.IP {
  2239  		return false
  2240  	}
  2241  
  2242  	if nr.MBits != other.MBits {
  2243  		return false
  2244  	}
  2245  
  2246  	if len(nr.ReservedPorts) != len(other.ReservedPorts) {
  2247  		return false
  2248  	}
  2249  
  2250  	for i, port := range nr.ReservedPorts {
  2251  		if len(other.ReservedPorts) <= i {
  2252  			return false
  2253  		}
  2254  		if port != other.ReservedPorts[i] {
  2255  			return false
  2256  		}
  2257  	}
  2258  
  2259  	if len(nr.DynamicPorts) != len(other.DynamicPorts) {
  2260  		return false
  2261  	}
  2262  	for i, port := range nr.DynamicPorts {
  2263  		if len(other.DynamicPorts) <= i {
  2264  			return false
  2265  		}
  2266  		if port != other.DynamicPorts[i] {
  2267  			return false
  2268  		}
  2269  	}
  2270  
  2271  	return true
  2272  }
  2273  
  2274  func (n *NetworkResource) Canonicalize() {
  2275  	// Ensure that an empty and nil slices are treated the same to avoid scheduling
  2276  	// problems since we use reflect DeepEquals.
  2277  	if len(n.ReservedPorts) == 0 {
  2278  		n.ReservedPorts = nil
  2279  	}
  2280  	if len(n.DynamicPorts) == 0 {
  2281  		n.DynamicPorts = nil
  2282  	}
  2283  }
  2284  
  2285  // MeetsMinResources returns an error if the resources specified are less than
  2286  // the minimum allowed.
  2287  func (n *NetworkResource) MeetsMinResources() error {
  2288  	var mErr multierror.Error
  2289  	if n.MBits < 1 {
  2290  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits))
  2291  	}
  2292  	return mErr.ErrorOrNil()
  2293  }
  2294  
  2295  // Copy returns a deep copy of the network resource
  2296  func (n *NetworkResource) Copy() *NetworkResource {
  2297  	if n == nil {
  2298  		return nil
  2299  	}
  2300  	newR := new(NetworkResource)
  2301  	*newR = *n
  2302  	if n.ReservedPorts != nil {
  2303  		newR.ReservedPorts = make([]Port, len(n.ReservedPorts))
  2304  		copy(newR.ReservedPorts, n.ReservedPorts)
  2305  	}
  2306  	if n.DynamicPorts != nil {
  2307  		newR.DynamicPorts = make([]Port, len(n.DynamicPorts))
  2308  		copy(newR.DynamicPorts, n.DynamicPorts)
  2309  	}
  2310  	return newR
  2311  }
  2312  
  2313  // Add adds the resources of the delta to this, potentially
  2314  // returning an error if not possible.
  2315  func (n *NetworkResource) Add(delta *NetworkResource) {
  2316  	if len(delta.ReservedPorts) > 0 {
  2317  		n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...)
  2318  	}
  2319  	n.MBits += delta.MBits
  2320  	n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...)
  2321  }
  2322  
  2323  func (n *NetworkResource) GoString() string {
  2324  	return fmt.Sprintf("*%#v", *n)
  2325  }
  2326  
  2327  // PortLabels returns a map of port labels to their assigned host ports.
  2328  func (n *NetworkResource) PortLabels() map[string]int {
  2329  	num := len(n.ReservedPorts) + len(n.DynamicPorts)
  2330  	labelValues := make(map[string]int, num)
  2331  	for _, port := range n.ReservedPorts {
  2332  		labelValues[port.Label] = port.Value
  2333  	}
  2334  	for _, port := range n.DynamicPorts {
  2335  		labelValues[port.Label] = port.Value
  2336  	}
  2337  	return labelValues
  2338  }
  2339  
  2340  // ConnectPort returns the Connect port for the given service. Returns false if
  2341  // no port was found for a service with that name.
  2342  func (n *NetworkResource) PortForService(serviceName string) (Port, bool) {
  2343  	label := fmt.Sprintf("%s-%s", ConnectProxyPrefix, serviceName)
  2344  	for _, port := range n.ReservedPorts {
  2345  		if port.Label == label {
  2346  			return port, true
  2347  		}
  2348  	}
  2349  	for _, port := range n.DynamicPorts {
  2350  		if port.Label == label {
  2351  			return port, true
  2352  		}
  2353  	}
  2354  
  2355  	return Port{}, false
  2356  }
  2357  
  2358  // Networks defined for a task on the Resources struct.
  2359  type Networks []*NetworkResource
  2360  
  2361  func (ns Networks) Copy() Networks {
  2362  	if len(ns) == 0 {
  2363  		return nil
  2364  	}
  2365  
  2366  	out := make([]*NetworkResource, len(ns))
  2367  	for i := range ns {
  2368  		out[i] = ns[i].Copy()
  2369  	}
  2370  	return out
  2371  }
  2372  
  2373  // Port assignment and IP for the given label or empty values.
  2374  func (ns Networks) Port(label string) (string, int) {
  2375  	for _, n := range ns {
  2376  		for _, p := range n.ReservedPorts {
  2377  			if p.Label == label {
  2378  				return n.IP, p.Value
  2379  			}
  2380  		}
  2381  		for _, p := range n.DynamicPorts {
  2382  			if p.Label == label {
  2383  				return n.IP, p.Value
  2384  			}
  2385  		}
  2386  	}
  2387  	return "", 0
  2388  }
  2389  
  2390  func (ns Networks) NetIndex(n *NetworkResource) int {
  2391  	for idx, net := range ns {
  2392  		if net.Device == n.Device {
  2393  			return idx
  2394  		}
  2395  	}
  2396  	return -1
  2397  }
  2398  
  2399  // RequestedDevice is used to request a device for a task.
  2400  type RequestedDevice struct {
  2401  	// Name is the request name. The possible values are as follows:
  2402  	// * <type>: A single value only specifies the type of request.
  2403  	// * <vendor>/<type>: A single slash delimiter assumes the vendor and type of device is specified.
  2404  	// * <vendor>/<type>/<name>: Two slash delimiters assume vendor, type and specific model are specified.
  2405  	//
  2406  	// Examples are as follows:
  2407  	// * "gpu"
  2408  	// * "nvidia/gpu"
  2409  	// * "nvidia/gpu/GTX2080Ti"
  2410  	Name string
  2411  
  2412  	// Count is the number of requested devices
  2413  	Count uint64
  2414  
  2415  	// Constraints are a set of constraints to apply when selecting the device
  2416  	// to use.
  2417  	Constraints Constraints
  2418  
  2419  	// Affinities are a set of affinities to apply when selecting the device
  2420  	// to use.
  2421  	Affinities Affinities
  2422  }
  2423  
  2424  func (r *RequestedDevice) Equals(o *RequestedDevice) bool {
  2425  	if r == o {
  2426  		return true
  2427  	}
  2428  	if r == nil || o == nil {
  2429  		return false
  2430  	}
  2431  	return r.Name == o.Name &&
  2432  		r.Count == o.Count &&
  2433  		r.Constraints.Equals(&o.Constraints) &&
  2434  		r.Affinities.Equals(&o.Affinities)
  2435  }
  2436  
  2437  func (r *RequestedDevice) Copy() *RequestedDevice {
  2438  	if r == nil {
  2439  		return nil
  2440  	}
  2441  
  2442  	nr := *r
  2443  	nr.Constraints = CopySliceConstraints(nr.Constraints)
  2444  	nr.Affinities = CopySliceAffinities(nr.Affinities)
  2445  
  2446  	return &nr
  2447  }
  2448  
  2449  func (r *RequestedDevice) ID() *DeviceIdTuple {
  2450  	if r == nil || r.Name == "" {
  2451  		return nil
  2452  	}
  2453  
  2454  	parts := strings.SplitN(r.Name, "/", 3)
  2455  	switch len(parts) {
  2456  	case 1:
  2457  		return &DeviceIdTuple{
  2458  			Type: parts[0],
  2459  		}
  2460  	case 2:
  2461  		return &DeviceIdTuple{
  2462  			Vendor: parts[0],
  2463  			Type:   parts[1],
  2464  		}
  2465  	default:
  2466  		return &DeviceIdTuple{
  2467  			Vendor: parts[0],
  2468  			Type:   parts[1],
  2469  			Name:   parts[2],
  2470  		}
  2471  	}
  2472  }
  2473  
  2474  func (r *RequestedDevice) Validate() error {
  2475  	if r == nil {
  2476  		return nil
  2477  	}
  2478  
  2479  	var mErr multierror.Error
  2480  	if r.Name == "" {
  2481  		multierror.Append(&mErr, errors.New("device name must be given as one of the following: type, vendor/type, or vendor/type/name"))
  2482  	}
  2483  
  2484  	for idx, constr := range r.Constraints {
  2485  		// Ensure that the constraint doesn't use an operand we do not allow
  2486  		switch constr.Operand {
  2487  		case ConstraintDistinctHosts, ConstraintDistinctProperty:
  2488  			outer := fmt.Errorf("Constraint %d validation failed: using unsupported operand %q", idx+1, constr.Operand)
  2489  			multierror.Append(&mErr, outer)
  2490  		default:
  2491  			if err := constr.Validate(); err != nil {
  2492  				outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
  2493  				multierror.Append(&mErr, outer)
  2494  			}
  2495  		}
  2496  	}
  2497  	for idx, affinity := range r.Affinities {
  2498  		if err := affinity.Validate(); err != nil {
  2499  			outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
  2500  			multierror.Append(&mErr, outer)
  2501  		}
  2502  	}
  2503  
  2504  	return mErr.ErrorOrNil()
  2505  }
  2506  
  2507  // NodeResources is used to define the resources available on a client node.
  2508  type NodeResources struct {
  2509  	Cpu      NodeCpuResources
  2510  	Memory   NodeMemoryResources
  2511  	Disk     NodeDiskResources
  2512  	Networks Networks
  2513  	Devices  []*NodeDeviceResource
  2514  }
  2515  
  2516  func (n *NodeResources) Copy() *NodeResources {
  2517  	if n == nil {
  2518  		return nil
  2519  	}
  2520  
  2521  	newN := new(NodeResources)
  2522  	*newN = *n
  2523  
  2524  	// Copy the networks
  2525  	newN.Networks = n.Networks.Copy()
  2526  
  2527  	// Copy the devices
  2528  	if n.Devices != nil {
  2529  		devices := len(n.Devices)
  2530  		newN.Devices = make([]*NodeDeviceResource, devices)
  2531  		for i := 0; i < devices; i++ {
  2532  			newN.Devices[i] = n.Devices[i].Copy()
  2533  		}
  2534  	}
  2535  
  2536  	return newN
  2537  }
  2538  
  2539  // Comparable returns a comparable version of the nodes resources. This
  2540  // conversion can be lossy so care must be taken when using it.
  2541  func (n *NodeResources) Comparable() *ComparableResources {
  2542  	if n == nil {
  2543  		return nil
  2544  	}
  2545  
  2546  	c := &ComparableResources{
  2547  		Flattened: AllocatedTaskResources{
  2548  			Cpu: AllocatedCpuResources{
  2549  				CpuShares: n.Cpu.CpuShares,
  2550  			},
  2551  			Memory: AllocatedMemoryResources{
  2552  				MemoryMB: n.Memory.MemoryMB,
  2553  			},
  2554  			Networks: n.Networks,
  2555  		},
  2556  		Shared: AllocatedSharedResources{
  2557  			DiskMB: n.Disk.DiskMB,
  2558  		},
  2559  	}
  2560  	return c
  2561  }
  2562  
  2563  func (n *NodeResources) Merge(o *NodeResources) {
  2564  	if o == nil {
  2565  		return
  2566  	}
  2567  
  2568  	n.Cpu.Merge(&o.Cpu)
  2569  	n.Memory.Merge(&o.Memory)
  2570  	n.Disk.Merge(&o.Disk)
  2571  
  2572  	if len(o.Networks) != 0 {
  2573  		n.Networks = o.Networks
  2574  	}
  2575  
  2576  	if len(o.Devices) != 0 {
  2577  		n.Devices = o.Devices
  2578  	}
  2579  }
  2580  
  2581  func (n *NodeResources) Equals(o *NodeResources) bool {
  2582  	if o == nil && n == nil {
  2583  		return true
  2584  	} else if o == nil {
  2585  		return false
  2586  	} else if n == nil {
  2587  		return false
  2588  	}
  2589  
  2590  	if !n.Cpu.Equals(&o.Cpu) {
  2591  		return false
  2592  	}
  2593  	if !n.Memory.Equals(&o.Memory) {
  2594  		return false
  2595  	}
  2596  	if !n.Disk.Equals(&o.Disk) {
  2597  		return false
  2598  	}
  2599  	if !n.Networks.Equals(&o.Networks) {
  2600  		return false
  2601  	}
  2602  
  2603  	// Check the devices
  2604  	if !DevicesEquals(n.Devices, o.Devices) {
  2605  		return false
  2606  	}
  2607  
  2608  	return true
  2609  }
  2610  
  2611  // Equals equates Networks as a set
  2612  func (ns *Networks) Equals(o *Networks) bool {
  2613  	if ns == o {
  2614  		return true
  2615  	}
  2616  	if ns == nil || o == nil {
  2617  		return false
  2618  	}
  2619  	if len(*ns) != len(*o) {
  2620  		return false
  2621  	}
  2622  SETEQUALS:
  2623  	for _, ne := range *ns {
  2624  		for _, oe := range *o {
  2625  			if ne.Equals(oe) {
  2626  				continue SETEQUALS
  2627  			}
  2628  		}
  2629  		return false
  2630  	}
  2631  	return true
  2632  }
  2633  
  2634  // DevicesEquals returns true if the two device arrays are set equal
  2635  func DevicesEquals(d1, d2 []*NodeDeviceResource) bool {
  2636  	if len(d1) != len(d2) {
  2637  		return false
  2638  	}
  2639  	idMap := make(map[DeviceIdTuple]*NodeDeviceResource, len(d1))
  2640  	for _, d := range d1 {
  2641  		idMap[*d.ID()] = d
  2642  	}
  2643  	for _, otherD := range d2 {
  2644  		if d, ok := idMap[*otherD.ID()]; !ok || !d.Equals(otherD) {
  2645  			return false
  2646  		}
  2647  	}
  2648  
  2649  	return true
  2650  }
  2651  
  2652  // NodeCpuResources captures the CPU resources of the node.
  2653  type NodeCpuResources struct {
  2654  	// CpuShares is the CPU shares available. This is calculated by number of
  2655  	// cores multiplied by the core frequency.
  2656  	CpuShares int64
  2657  }
  2658  
  2659  func (n *NodeCpuResources) Merge(o *NodeCpuResources) {
  2660  	if o == nil {
  2661  		return
  2662  	}
  2663  
  2664  	if o.CpuShares != 0 {
  2665  		n.CpuShares = o.CpuShares
  2666  	}
  2667  }
  2668  
  2669  func (n *NodeCpuResources) Equals(o *NodeCpuResources) bool {
  2670  	if o == nil && n == nil {
  2671  		return true
  2672  	} else if o == nil {
  2673  		return false
  2674  	} else if n == nil {
  2675  		return false
  2676  	}
  2677  
  2678  	if n.CpuShares != o.CpuShares {
  2679  		return false
  2680  	}
  2681  
  2682  	return true
  2683  }
  2684  
  2685  // NodeMemoryResources captures the memory resources of the node
  2686  type NodeMemoryResources struct {
  2687  	// MemoryMB is the total available memory on the node
  2688  	MemoryMB int64
  2689  }
  2690  
  2691  func (n *NodeMemoryResources) Merge(o *NodeMemoryResources) {
  2692  	if o == nil {
  2693  		return
  2694  	}
  2695  
  2696  	if o.MemoryMB != 0 {
  2697  		n.MemoryMB = o.MemoryMB
  2698  	}
  2699  }
  2700  
  2701  func (n *NodeMemoryResources) Equals(o *NodeMemoryResources) bool {
  2702  	if o == nil && n == nil {
  2703  		return true
  2704  	} else if o == nil {
  2705  		return false
  2706  	} else if n == nil {
  2707  		return false
  2708  	}
  2709  
  2710  	if n.MemoryMB != o.MemoryMB {
  2711  		return false
  2712  	}
  2713  
  2714  	return true
  2715  }
  2716  
  2717  // NodeDiskResources captures the disk resources of the node
  2718  type NodeDiskResources struct {
  2719  	// DiskMB is the total available disk space on the node
  2720  	DiskMB int64
  2721  }
  2722  
  2723  func (n *NodeDiskResources) Merge(o *NodeDiskResources) {
  2724  	if o == nil {
  2725  		return
  2726  	}
  2727  	if o.DiskMB != 0 {
  2728  		n.DiskMB = o.DiskMB
  2729  	}
  2730  }
  2731  
  2732  func (n *NodeDiskResources) Equals(o *NodeDiskResources) bool {
  2733  	if o == nil && n == nil {
  2734  		return true
  2735  	} else if o == nil {
  2736  		return false
  2737  	} else if n == nil {
  2738  		return false
  2739  	}
  2740  
  2741  	if n.DiskMB != o.DiskMB {
  2742  		return false
  2743  	}
  2744  
  2745  	return true
  2746  }
  2747  
  2748  // DeviceIdTuple is the tuple that identifies a device
  2749  type DeviceIdTuple struct {
  2750  	Vendor string
  2751  	Type   string
  2752  	Name   string
  2753  }
  2754  
  2755  func (d *DeviceIdTuple) String() string {
  2756  	if d == nil {
  2757  		return ""
  2758  	}
  2759  
  2760  	return fmt.Sprintf("%s/%s/%s", d.Vendor, d.Type, d.Name)
  2761  }
  2762  
  2763  // Matches returns if this Device ID is a superset of the passed ID.
  2764  func (id *DeviceIdTuple) Matches(other *DeviceIdTuple) bool {
  2765  	if other == nil {
  2766  		return false
  2767  	}
  2768  
  2769  	if other.Name != "" && other.Name != id.Name {
  2770  		return false
  2771  	}
  2772  
  2773  	if other.Vendor != "" && other.Vendor != id.Vendor {
  2774  		return false
  2775  	}
  2776  
  2777  	if other.Type != "" && other.Type != id.Type {
  2778  		return false
  2779  	}
  2780  
  2781  	return true
  2782  }
  2783  
  2784  // Equals returns if this Device ID is the same as the passed ID.
  2785  func (id *DeviceIdTuple) Equals(o *DeviceIdTuple) bool {
  2786  	if id == nil && o == nil {
  2787  		return true
  2788  	} else if id == nil || o == nil {
  2789  		return false
  2790  	}
  2791  
  2792  	return o.Vendor == id.Vendor && o.Type == id.Type && o.Name == id.Name
  2793  }
  2794  
  2795  // NodeDeviceResource captures a set of devices sharing a common
  2796  // vendor/type/device_name tuple.
  2797  type NodeDeviceResource struct {
  2798  	Vendor     string
  2799  	Type       string
  2800  	Name       string
  2801  	Instances  []*NodeDevice
  2802  	Attributes map[string]*psstructs.Attribute
  2803  }
  2804  
  2805  func (n *NodeDeviceResource) ID() *DeviceIdTuple {
  2806  	if n == nil {
  2807  		return nil
  2808  	}
  2809  
  2810  	return &DeviceIdTuple{
  2811  		Vendor: n.Vendor,
  2812  		Type:   n.Type,
  2813  		Name:   n.Name,
  2814  	}
  2815  }
  2816  
  2817  func (n *NodeDeviceResource) Copy() *NodeDeviceResource {
  2818  	if n == nil {
  2819  		return nil
  2820  	}
  2821  
  2822  	// Copy the primitives
  2823  	nn := *n
  2824  
  2825  	// Copy the device instances
  2826  	if l := len(nn.Instances); l != 0 {
  2827  		nn.Instances = make([]*NodeDevice, 0, l)
  2828  		for _, d := range n.Instances {
  2829  			nn.Instances = append(nn.Instances, d.Copy())
  2830  		}
  2831  	}
  2832  
  2833  	// Copy the Attributes
  2834  	nn.Attributes = psstructs.CopyMapStringAttribute(nn.Attributes)
  2835  
  2836  	return &nn
  2837  }
  2838  
  2839  func (n *NodeDeviceResource) Equals(o *NodeDeviceResource) bool {
  2840  	if o == nil && n == nil {
  2841  		return true
  2842  	} else if o == nil {
  2843  		return false
  2844  	} else if n == nil {
  2845  		return false
  2846  	}
  2847  
  2848  	if n.Vendor != o.Vendor {
  2849  		return false
  2850  	} else if n.Type != o.Type {
  2851  		return false
  2852  	} else if n.Name != o.Name {
  2853  		return false
  2854  	}
  2855  
  2856  	// Check the attributes
  2857  	if len(n.Attributes) != len(o.Attributes) {
  2858  		return false
  2859  	}
  2860  	for k, v := range n.Attributes {
  2861  		if otherV, ok := o.Attributes[k]; !ok || v != otherV {
  2862  			return false
  2863  		}
  2864  	}
  2865  
  2866  	// Check the instances
  2867  	if len(n.Instances) != len(o.Instances) {
  2868  		return false
  2869  	}
  2870  	idMap := make(map[string]*NodeDevice, len(n.Instances))
  2871  	for _, d := range n.Instances {
  2872  		idMap[d.ID] = d
  2873  	}
  2874  	for _, otherD := range o.Instances {
  2875  		if d, ok := idMap[otherD.ID]; !ok || !d.Equals(otherD) {
  2876  			return false
  2877  		}
  2878  	}
  2879  
  2880  	return true
  2881  }
  2882  
  2883  // NodeDevice is an instance of a particular device.
  2884  type NodeDevice struct {
  2885  	// ID is the ID of the device.
  2886  	ID string
  2887  
  2888  	// Healthy captures whether the device is healthy.
  2889  	Healthy bool
  2890  
  2891  	// HealthDescription is used to provide a human readable description of why
  2892  	// the device may be unhealthy.
  2893  	HealthDescription string
  2894  
  2895  	// Locality stores HW locality information for the node to optionally be
  2896  	// used when making placement decisions.
  2897  	Locality *NodeDeviceLocality
  2898  }
  2899  
  2900  func (n *NodeDevice) Equals(o *NodeDevice) bool {
  2901  	if o == nil && n == nil {
  2902  		return true
  2903  	} else if o == nil {
  2904  		return false
  2905  	} else if n == nil {
  2906  		return false
  2907  	}
  2908  
  2909  	if n.ID != o.ID {
  2910  		return false
  2911  	} else if n.Healthy != o.Healthy {
  2912  		return false
  2913  	} else if n.HealthDescription != o.HealthDescription {
  2914  		return false
  2915  	} else if !n.Locality.Equals(o.Locality) {
  2916  		return false
  2917  	}
  2918  
  2919  	return false
  2920  }
  2921  
  2922  func (n *NodeDevice) Copy() *NodeDevice {
  2923  	if n == nil {
  2924  		return nil
  2925  	}
  2926  
  2927  	// Copy the primitives
  2928  	nn := *n
  2929  
  2930  	// Copy the locality
  2931  	nn.Locality = nn.Locality.Copy()
  2932  
  2933  	return &nn
  2934  }
  2935  
  2936  // NodeDeviceLocality stores information about the devices hardware locality on
  2937  // the node.
  2938  type NodeDeviceLocality struct {
  2939  	// PciBusID is the PCI Bus ID for the device.
  2940  	PciBusID string
  2941  }
  2942  
  2943  func (n *NodeDeviceLocality) Equals(o *NodeDeviceLocality) bool {
  2944  	if o == nil && n == nil {
  2945  		return true
  2946  	} else if o == nil {
  2947  		return false
  2948  	} else if n == nil {
  2949  		return false
  2950  	}
  2951  
  2952  	if n.PciBusID != o.PciBusID {
  2953  		return false
  2954  	}
  2955  
  2956  	return true
  2957  }
  2958  
  2959  func (n *NodeDeviceLocality) Copy() *NodeDeviceLocality {
  2960  	if n == nil {
  2961  		return nil
  2962  	}
  2963  
  2964  	// Copy the primitives
  2965  	nn := *n
  2966  	return &nn
  2967  }
  2968  
  2969  // NodeReservedResources is used to capture the resources on a client node that
  2970  // should be reserved and not made available to jobs.
  2971  type NodeReservedResources struct {
  2972  	Cpu      NodeReservedCpuResources
  2973  	Memory   NodeReservedMemoryResources
  2974  	Disk     NodeReservedDiskResources
  2975  	Networks NodeReservedNetworkResources
  2976  }
  2977  
  2978  func (n *NodeReservedResources) Copy() *NodeReservedResources {
  2979  	if n == nil {
  2980  		return nil
  2981  	}
  2982  	newN := new(NodeReservedResources)
  2983  	*newN = *n
  2984  	return newN
  2985  }
  2986  
  2987  // Comparable returns a comparable version of the node's reserved resources. The
  2988  // returned resources doesn't contain any network information. This conversion
  2989  // can be lossy so care must be taken when using it.
  2990  func (n *NodeReservedResources) Comparable() *ComparableResources {
  2991  	if n == nil {
  2992  		return nil
  2993  	}
  2994  
  2995  	c := &ComparableResources{
  2996  		Flattened: AllocatedTaskResources{
  2997  			Cpu: AllocatedCpuResources{
  2998  				CpuShares: n.Cpu.CpuShares,
  2999  			},
  3000  			Memory: AllocatedMemoryResources{
  3001  				MemoryMB: n.Memory.MemoryMB,
  3002  			},
  3003  		},
  3004  		Shared: AllocatedSharedResources{
  3005  			DiskMB: n.Disk.DiskMB,
  3006  		},
  3007  	}
  3008  	return c
  3009  }
  3010  
  3011  // NodeReservedCpuResources captures the reserved CPU resources of the node.
  3012  type NodeReservedCpuResources struct {
  3013  	CpuShares int64
  3014  }
  3015  
  3016  // NodeReservedMemoryResources captures the reserved memory resources of the node.
  3017  type NodeReservedMemoryResources struct {
  3018  	MemoryMB int64
  3019  }
  3020  
  3021  // NodeReservedDiskResources captures the reserved disk resources of the node.
  3022  type NodeReservedDiskResources struct {
  3023  	DiskMB int64
  3024  }
  3025  
  3026  // NodeReservedNetworkResources captures the reserved network resources of the node.
  3027  type NodeReservedNetworkResources struct {
  3028  	// ReservedHostPorts is the set of ports reserved on all host network
  3029  	// interfaces. Its format is a comma separate list of integers or integer
  3030  	// ranges. (80,443,1000-2000,2005)
  3031  	ReservedHostPorts string
  3032  }
  3033  
  3034  // ParsePortHostPorts returns the reserved host ports.
  3035  func (n *NodeReservedNetworkResources) ParseReservedHostPorts() ([]uint64, error) {
  3036  	return ParsePortRanges(n.ReservedHostPorts)
  3037  }
  3038  
  3039  // AllocatedResources is the set of resources to be used by an allocation.
  3040  type AllocatedResources struct {
  3041  	// Tasks is a mapping of task name to the resources for the task.
  3042  	Tasks          map[string]*AllocatedTaskResources
  3043  	TaskLifecycles map[string]*TaskLifecycleConfig
  3044  
  3045  	// Shared is the set of resource that are shared by all tasks in the group.
  3046  	Shared AllocatedSharedResources
  3047  }
  3048  
  3049  func (a *AllocatedResources) Copy() *AllocatedResources {
  3050  	if a == nil {
  3051  		return nil
  3052  	}
  3053  
  3054  	out := AllocatedResources{
  3055  		Shared: a.Shared.Copy(),
  3056  	}
  3057  
  3058  	if a.Tasks != nil {
  3059  		out.Tasks = make(map[string]*AllocatedTaskResources, len(out.Tasks))
  3060  		for task, resource := range a.Tasks {
  3061  			out.Tasks[task] = resource.Copy()
  3062  		}
  3063  	}
  3064  	if a.TaskLifecycles != nil {
  3065  		out.TaskLifecycles = make(map[string]*TaskLifecycleConfig, len(out.TaskLifecycles))
  3066  		for task, lifecycle := range a.TaskLifecycles {
  3067  			out.TaskLifecycles[task] = lifecycle.Copy()
  3068  		}
  3069  
  3070  	}
  3071  
  3072  	return &out
  3073  }
  3074  
  3075  // Comparable returns a comparable version of the allocations allocated
  3076  // resources. This conversion can be lossy so care must be taken when using it.
  3077  func (a *AllocatedResources) Comparable() *ComparableResources {
  3078  	if a == nil {
  3079  		return nil
  3080  	}
  3081  
  3082  	c := &ComparableResources{
  3083  		Shared: a.Shared,
  3084  	}
  3085  
  3086  	prestartSidecarTasks := &AllocatedTaskResources{}
  3087  	prestartEphemeralTasks := &AllocatedTaskResources{}
  3088  	main := &AllocatedTaskResources{}
  3089  
  3090  	for taskName, r := range a.Tasks {
  3091  		lc := a.TaskLifecycles[taskName]
  3092  		if lc == nil {
  3093  			main.Add(r)
  3094  		} else if lc.Hook == TaskLifecycleHookPrestart {
  3095  			if lc.Sidecar {
  3096  				prestartSidecarTasks.Add(r)
  3097  			} else {
  3098  				prestartEphemeralTasks.Add(r)
  3099  			}
  3100  		}
  3101  	}
  3102  
  3103  	// update this loop to account for lifecycle hook
  3104  	prestartEphemeralTasks.Max(main)
  3105  	prestartSidecarTasks.Add(prestartEphemeralTasks)
  3106  	c.Flattened.Add(prestartSidecarTasks)
  3107  
  3108  	// Add network resources that are at the task group level
  3109  	for _, network := range a.Shared.Networks {
  3110  		c.Flattened.Add(&AllocatedTaskResources{
  3111  			Networks: []*NetworkResource{network},
  3112  		})
  3113  	}
  3114  
  3115  	return c
  3116  }
  3117  
  3118  // OldTaskResources returns the pre-0.9.0 map of task resources
  3119  func (a *AllocatedResources) OldTaskResources() map[string]*Resources {
  3120  	m := make(map[string]*Resources, len(a.Tasks))
  3121  	for name, res := range a.Tasks {
  3122  		m[name] = &Resources{
  3123  			CPU:      int(res.Cpu.CpuShares),
  3124  			MemoryMB: int(res.Memory.MemoryMB),
  3125  			Networks: res.Networks,
  3126  		}
  3127  	}
  3128  
  3129  	return m
  3130  }
  3131  
  3132  // AllocatedTaskResources are the set of resources allocated to a task.
  3133  type AllocatedTaskResources struct {
  3134  	Cpu      AllocatedCpuResources
  3135  	Memory   AllocatedMemoryResources
  3136  	Networks Networks
  3137  	Devices  []*AllocatedDeviceResource
  3138  }
  3139  
  3140  func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources {
  3141  	if a == nil {
  3142  		return nil
  3143  	}
  3144  	newA := new(AllocatedTaskResources)
  3145  	*newA = *a
  3146  
  3147  	// Copy the networks
  3148  	newA.Networks = a.Networks.Copy()
  3149  
  3150  	// Copy the devices
  3151  	if newA.Devices != nil {
  3152  		n := len(a.Devices)
  3153  		newA.Devices = make([]*AllocatedDeviceResource, n)
  3154  		for i := 0; i < n; i++ {
  3155  			newA.Devices[i] = a.Devices[i].Copy()
  3156  		}
  3157  	}
  3158  
  3159  	return newA
  3160  }
  3161  
  3162  // NetIndex finds the matching net index using device name
  3163  func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int {
  3164  	return a.Networks.NetIndex(n)
  3165  }
  3166  
  3167  func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) {
  3168  	if delta == nil {
  3169  		return
  3170  	}
  3171  
  3172  	a.Cpu.Add(&delta.Cpu)
  3173  	a.Memory.Add(&delta.Memory)
  3174  
  3175  	for _, n := range delta.Networks {
  3176  		// Find the matching interface by IP or CIDR
  3177  		idx := a.NetIndex(n)
  3178  		if idx == -1 {
  3179  			a.Networks = append(a.Networks, n.Copy())
  3180  		} else {
  3181  			a.Networks[idx].Add(n)
  3182  		}
  3183  	}
  3184  
  3185  	for _, d := range delta.Devices {
  3186  		// Find the matching device
  3187  		idx := AllocatedDevices(a.Devices).Index(d)
  3188  		if idx == -1 {
  3189  			a.Devices = append(a.Devices, d.Copy())
  3190  		} else {
  3191  			a.Devices[idx].Add(d)
  3192  		}
  3193  	}
  3194  }
  3195  
  3196  func (a *AllocatedTaskResources) Max(other *AllocatedTaskResources) {
  3197  	if other == nil {
  3198  		return
  3199  	}
  3200  
  3201  	a.Cpu.Max(&other.Cpu)
  3202  	a.Memory.Max(&other.Memory)
  3203  
  3204  	for _, n := range other.Networks {
  3205  		// Find the matching interface by IP or CIDR
  3206  		idx := a.NetIndex(n)
  3207  		if idx == -1 {
  3208  			a.Networks = append(a.Networks, n.Copy())
  3209  		} else {
  3210  			a.Networks[idx].Add(n)
  3211  		}
  3212  	}
  3213  
  3214  	for _, d := range other.Devices {
  3215  		// Find the matching device
  3216  		idx := AllocatedDevices(a.Devices).Index(d)
  3217  		if idx == -1 {
  3218  			a.Devices = append(a.Devices, d.Copy())
  3219  		} else {
  3220  			a.Devices[idx].Add(d)
  3221  		}
  3222  	}
  3223  }
  3224  
  3225  // Comparable turns AllocatedTaskResources into ComparableResources
  3226  // as a helper step in preemption
  3227  func (a *AllocatedTaskResources) Comparable() *ComparableResources {
  3228  	ret := &ComparableResources{
  3229  		Flattened: AllocatedTaskResources{
  3230  			Cpu: AllocatedCpuResources{
  3231  				CpuShares: a.Cpu.CpuShares,
  3232  			},
  3233  			Memory: AllocatedMemoryResources{
  3234  				MemoryMB: a.Memory.MemoryMB,
  3235  			},
  3236  		},
  3237  	}
  3238  	if len(a.Networks) > 0 {
  3239  		for _, net := range a.Networks {
  3240  			ret.Flattened.Networks = append(ret.Flattened.Networks, net)
  3241  		}
  3242  	}
  3243  	return ret
  3244  }
  3245  
  3246  // Subtract only subtracts CPU and Memory resources. Network utilization
  3247  // is managed separately in NetworkIndex
  3248  func (a *AllocatedTaskResources) Subtract(delta *AllocatedTaskResources) {
  3249  	if delta == nil {
  3250  		return
  3251  	}
  3252  
  3253  	a.Cpu.Subtract(&delta.Cpu)
  3254  	a.Memory.Subtract(&delta.Memory)
  3255  }
  3256  
  3257  // AllocatedSharedResources are the set of resources allocated to a task group.
  3258  type AllocatedSharedResources struct {
  3259  	Networks Networks
  3260  	DiskMB   int64
  3261  }
  3262  
  3263  func (a AllocatedSharedResources) Copy() AllocatedSharedResources {
  3264  	return AllocatedSharedResources{
  3265  		Networks: a.Networks.Copy(),
  3266  		DiskMB:   a.DiskMB,
  3267  	}
  3268  }
  3269  
  3270  func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) {
  3271  	if delta == nil {
  3272  		return
  3273  	}
  3274  	a.Networks = append(a.Networks, delta.Networks...)
  3275  	a.DiskMB += delta.DiskMB
  3276  
  3277  }
  3278  
  3279  func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) {
  3280  	if delta == nil {
  3281  		return
  3282  	}
  3283  
  3284  	diff := map[*NetworkResource]bool{}
  3285  	for _, n := range delta.Networks {
  3286  		diff[n] = true
  3287  	}
  3288  	var nets Networks
  3289  	for _, n := range a.Networks {
  3290  		if _, ok := diff[n]; !ok {
  3291  			nets = append(nets, n)
  3292  		}
  3293  	}
  3294  	a.Networks = nets
  3295  	a.DiskMB -= delta.DiskMB
  3296  }
  3297  
  3298  // AllocatedCpuResources captures the allocated CPU resources.
  3299  type AllocatedCpuResources struct {
  3300  	CpuShares int64
  3301  }
  3302  
  3303  func (a *AllocatedCpuResources) Add(delta *AllocatedCpuResources) {
  3304  	if delta == nil {
  3305  		return
  3306  	}
  3307  
  3308  	a.CpuShares += delta.CpuShares
  3309  }
  3310  
  3311  func (a *AllocatedCpuResources) Subtract(delta *AllocatedCpuResources) {
  3312  	if delta == nil {
  3313  		return
  3314  	}
  3315  
  3316  	a.CpuShares -= delta.CpuShares
  3317  }
  3318  
  3319  func (a *AllocatedCpuResources) Max(other *AllocatedCpuResources) {
  3320  	if other == nil {
  3321  		return
  3322  	}
  3323  
  3324  	if other.CpuShares > a.CpuShares {
  3325  		a.CpuShares = other.CpuShares
  3326  	}
  3327  }
  3328  
  3329  // AllocatedMemoryResources captures the allocated memory resources.
  3330  type AllocatedMemoryResources struct {
  3331  	MemoryMB int64
  3332  }
  3333  
  3334  func (a *AllocatedMemoryResources) Add(delta *AllocatedMemoryResources) {
  3335  	if delta == nil {
  3336  		return
  3337  	}
  3338  
  3339  	a.MemoryMB += delta.MemoryMB
  3340  }
  3341  
  3342  func (a *AllocatedMemoryResources) Subtract(delta *AllocatedMemoryResources) {
  3343  	if delta == nil {
  3344  		return
  3345  	}
  3346  
  3347  	a.MemoryMB -= delta.MemoryMB
  3348  }
  3349  
  3350  func (a *AllocatedMemoryResources) Max(other *AllocatedMemoryResources) {
  3351  	if other == nil {
  3352  		return
  3353  	}
  3354  
  3355  	if other.MemoryMB > a.MemoryMB {
  3356  		a.MemoryMB = other.MemoryMB
  3357  	}
  3358  }
  3359  
  3360  type AllocatedDevices []*AllocatedDeviceResource
  3361  
  3362  // Index finds the matching index using the passed device. If not found, -1 is
  3363  // returned.
  3364  func (a AllocatedDevices) Index(d *AllocatedDeviceResource) int {
  3365  	if d == nil {
  3366  		return -1
  3367  	}
  3368  
  3369  	for i, o := range a {
  3370  		if o.ID().Equals(d.ID()) {
  3371  			return i
  3372  		}
  3373  	}
  3374  
  3375  	return -1
  3376  }
  3377  
  3378  // AllocatedDeviceResource captures a set of allocated devices.
  3379  type AllocatedDeviceResource struct {
  3380  	// Vendor, Type, and Name are used to select the plugin to request the
  3381  	// device IDs from.
  3382  	Vendor string
  3383  	Type   string
  3384  	Name   string
  3385  
  3386  	// DeviceIDs is the set of allocated devices
  3387  	DeviceIDs []string
  3388  }
  3389  
  3390  func (a *AllocatedDeviceResource) ID() *DeviceIdTuple {
  3391  	if a == nil {
  3392  		return nil
  3393  	}
  3394  
  3395  	return &DeviceIdTuple{
  3396  		Vendor: a.Vendor,
  3397  		Type:   a.Type,
  3398  		Name:   a.Name,
  3399  	}
  3400  }
  3401  
  3402  func (a *AllocatedDeviceResource) Add(delta *AllocatedDeviceResource) {
  3403  	if delta == nil {
  3404  		return
  3405  	}
  3406  
  3407  	a.DeviceIDs = append(a.DeviceIDs, delta.DeviceIDs...)
  3408  }
  3409  
  3410  func (a *AllocatedDeviceResource) Copy() *AllocatedDeviceResource {
  3411  	if a == nil {
  3412  		return a
  3413  	}
  3414  
  3415  	na := *a
  3416  
  3417  	// Copy the devices
  3418  	na.DeviceIDs = make([]string, len(a.DeviceIDs))
  3419  	for i, id := range a.DeviceIDs {
  3420  		na.DeviceIDs[i] = id
  3421  	}
  3422  
  3423  	return &na
  3424  }
  3425  
  3426  // ComparableResources is the set of resources allocated to a task group but
  3427  // not keyed by Task, making it easier to compare.
  3428  type ComparableResources struct {
  3429  	Flattened AllocatedTaskResources
  3430  	Shared    AllocatedSharedResources
  3431  }
  3432  
  3433  func (c *ComparableResources) Add(delta *ComparableResources) {
  3434  	if delta == nil {
  3435  		return
  3436  	}
  3437  
  3438  	c.Flattened.Add(&delta.Flattened)
  3439  	c.Shared.Add(&delta.Shared)
  3440  }
  3441  
  3442  func (c *ComparableResources) Subtract(delta *ComparableResources) {
  3443  	if delta == nil {
  3444  		return
  3445  	}
  3446  
  3447  	c.Flattened.Subtract(&delta.Flattened)
  3448  	c.Shared.Subtract(&delta.Shared)
  3449  }
  3450  
  3451  func (c *ComparableResources) Copy() *ComparableResources {
  3452  	if c == nil {
  3453  		return nil
  3454  	}
  3455  	newR := new(ComparableResources)
  3456  	*newR = *c
  3457  	return newR
  3458  }
  3459  
  3460  // Superset checks if one set of resources is a superset of another. This
  3461  // ignores network resources, and the NetworkIndex should be used for that.
  3462  func (c *ComparableResources) Superset(other *ComparableResources) (bool, string) {
  3463  	if c.Flattened.Cpu.CpuShares < other.Flattened.Cpu.CpuShares {
  3464  		return false, "cpu"
  3465  	}
  3466  	if c.Flattened.Memory.MemoryMB < other.Flattened.Memory.MemoryMB {
  3467  		return false, "memory"
  3468  	}
  3469  	if c.Shared.DiskMB < other.Shared.DiskMB {
  3470  		return false, "disk"
  3471  	}
  3472  	return true, ""
  3473  }
  3474  
  3475  // allocated finds the matching net index using device name
  3476  func (c *ComparableResources) NetIndex(n *NetworkResource) int {
  3477  	return c.Flattened.Networks.NetIndex(n)
  3478  }
  3479  
  3480  const (
  3481  	// JobTypeNomad is reserved for internal system tasks and is
  3482  	// always handled by the CoreScheduler.
  3483  	JobTypeCore    = "_core"
  3484  	JobTypeService = "service"
  3485  	JobTypeBatch   = "batch"
  3486  	JobTypeSystem  = "system"
  3487  )
  3488  
  3489  const (
  3490  	JobStatusPending = "pending" // Pending means the job is waiting on scheduling
  3491  	JobStatusRunning = "running" // Running means the job has non-terminal allocations
  3492  	JobStatusDead    = "dead"    // Dead means all evaluation's and allocations are terminal
  3493  )
  3494  
  3495  const (
  3496  	// JobMinPriority is the minimum allowed priority
  3497  	JobMinPriority = 1
  3498  
  3499  	// JobDefaultPriority is the default priority if not
  3500  	// not specified.
  3501  	JobDefaultPriority = 50
  3502  
  3503  	// JobMaxPriority is the maximum allowed priority
  3504  	JobMaxPriority = 100
  3505  
  3506  	// Ensure CoreJobPriority is higher than any user
  3507  	// specified job so that it gets priority. This is important
  3508  	// for the system to remain healthy.
  3509  	CoreJobPriority = JobMaxPriority * 2
  3510  
  3511  	// JobTrackedVersions is the number of historic job versions that are
  3512  	// kept.
  3513  	JobTrackedVersions = 6
  3514  
  3515  	// JobTrackedScalingEvents is the number of scaling events that are
  3516  	// kept for a single task group.
  3517  	JobTrackedScalingEvents = 20
  3518  )
  3519  
  3520  // Job is the scope of a scheduling request to Nomad. It is the largest
  3521  // scoped object, and is a named collection of task groups. Each task group
  3522  // is further composed of tasks. A task group (TG) is the unit of scheduling
  3523  // however.
  3524  type Job struct {
  3525  	// Stop marks whether the user has stopped the job. A stopped job will
  3526  	// have all created allocations stopped and acts as a way to stop a job
  3527  	// without purging it from the system. This allows existing allocs to be
  3528  	// queried and the job to be inspected as it is being killed.
  3529  	Stop bool
  3530  
  3531  	// Region is the Nomad region that handles scheduling this job
  3532  	Region string
  3533  
  3534  	// Namespace is the namespace the job is submitted into.
  3535  	Namespace string
  3536  
  3537  	// ID is a unique identifier for the job per region. It can be
  3538  	// specified hierarchically like LineOfBiz/OrgName/Team/Project
  3539  	ID string
  3540  
  3541  	// ParentID is the unique identifier of the job that spawned this job.
  3542  	ParentID string
  3543  
  3544  	// Name is the logical name of the job used to refer to it. This is unique
  3545  	// per region, but not unique globally.
  3546  	Name string
  3547  
  3548  	// Type is used to control various behaviors about the job. Most jobs
  3549  	// are service jobs, meaning they are expected to be long lived.
  3550  	// Some jobs are batch oriented meaning they run and then terminate.
  3551  	// This can be extended in the future to support custom schedulers.
  3552  	Type string
  3553  
  3554  	// Priority is used to control scheduling importance and if this job
  3555  	// can preempt other jobs.
  3556  	Priority int
  3557  
  3558  	// AllAtOnce is used to control if incremental scheduling of task groups
  3559  	// is allowed or if we must do a gang scheduling of the entire job. This
  3560  	// can slow down larger jobs if resources are not available.
  3561  	AllAtOnce bool
  3562  
  3563  	// Datacenters contains all the datacenters this job is allowed to span
  3564  	Datacenters []string
  3565  
  3566  	// Constraints can be specified at a job level and apply to
  3567  	// all the task groups and tasks.
  3568  	Constraints []*Constraint
  3569  
  3570  	// Affinities can be specified at the job level to express
  3571  	// scheduling preferences that apply to all groups and tasks
  3572  	Affinities []*Affinity
  3573  
  3574  	// Spread can be specified at the job level to express spreading
  3575  	// allocations across a desired attribute, such as datacenter
  3576  	Spreads []*Spread
  3577  
  3578  	// TaskGroups are the collections of task groups that this job needs
  3579  	// to run. Each task group is an atomic unit of scheduling and placement.
  3580  	TaskGroups []*TaskGroup
  3581  
  3582  	// See agent.ApiJobToStructJob
  3583  	// Update provides defaults for the TaskGroup Update stanzas
  3584  	Update UpdateStrategy
  3585  
  3586  	// Periodic is used to define the interval the job is run at.
  3587  	Periodic *PeriodicConfig
  3588  
  3589  	// ParameterizedJob is used to specify the job as a parameterized job
  3590  	// for dispatching.
  3591  	ParameterizedJob *ParameterizedJobConfig
  3592  
  3593  	// Dispatched is used to identify if the Job has been dispatched from a
  3594  	// parameterized job.
  3595  	Dispatched bool
  3596  
  3597  	// Payload is the payload supplied when the job was dispatched.
  3598  	Payload []byte
  3599  
  3600  	// Meta is used to associate arbitrary metadata with this
  3601  	// job. This is opaque to Nomad.
  3602  	Meta map[string]string
  3603  
  3604  	// ConsulToken is the Consul token that proves the submitter of the job has
  3605  	// access to the Service Identity policies associated with the job's
  3606  	// Consul Connect enabled services. This field is only used to transfer the
  3607  	// token and is not stored after Job submission.
  3608  	ConsulToken string
  3609  
  3610  	// VaultToken is the Vault token that proves the submitter of the job has
  3611  	// access to the specified Vault policies. This field is only used to
  3612  	// transfer the token and is not stored after Job submission.
  3613  	VaultToken string
  3614  
  3615  	// Job status
  3616  	Status string
  3617  
  3618  	// StatusDescription is meant to provide more human useful information
  3619  	StatusDescription string
  3620  
  3621  	// Stable marks a job as stable. Stability is only defined on "service" and
  3622  	// "system" jobs. The stability of a job will be set automatically as part
  3623  	// of a deployment and can be manually set via APIs. This field is updated
  3624  	// when the status of a corresponding deployment transitions to Failed
  3625  	// or Successful. This field is not meaningful for jobs that don't have an
  3626  	// update stanza.
  3627  	Stable bool
  3628  
  3629  	// Version is a monotonically increasing version number that is incremented
  3630  	// on each job register.
  3631  	Version uint64
  3632  
  3633  	// SubmitTime is the time at which the job was submitted as a UnixNano in
  3634  	// UTC
  3635  	SubmitTime int64
  3636  
  3637  	// Raft Indexes
  3638  	CreateIndex    uint64
  3639  	ModifyIndex    uint64
  3640  	JobModifyIndex uint64
  3641  }
  3642  
  3643  // NamespacedID returns the namespaced id useful for logging
  3644  func (j *Job) NamespacedID() *NamespacedID {
  3645  	return &NamespacedID{
  3646  		ID:        j.ID,
  3647  		Namespace: j.Namespace,
  3648  	}
  3649  }
  3650  
  3651  // Canonicalize is used to canonicalize fields in the Job. This should be called
  3652  // when registering a Job. A set of warnings are returned if the job was changed
  3653  // in anyway that the user should be made aware of.
  3654  func (j *Job) Canonicalize() (warnings error) {
  3655  	if j == nil {
  3656  		return nil
  3657  	}
  3658  
  3659  	var mErr multierror.Error
  3660  	// Ensure that an empty and nil map are treated the same to avoid scheduling
  3661  	// problems since we use reflect DeepEquals.
  3662  	if len(j.Meta) == 0 {
  3663  		j.Meta = nil
  3664  	}
  3665  
  3666  	// Ensure the job is in a namespace.
  3667  	if j.Namespace == "" {
  3668  		j.Namespace = DefaultNamespace
  3669  	}
  3670  
  3671  	for _, tg := range j.TaskGroups {
  3672  		tg.Canonicalize(j)
  3673  	}
  3674  
  3675  	if j.ParameterizedJob != nil {
  3676  		j.ParameterizedJob.Canonicalize()
  3677  	}
  3678  
  3679  	if j.Periodic != nil {
  3680  		j.Periodic.Canonicalize()
  3681  	}
  3682  
  3683  	return mErr.ErrorOrNil()
  3684  }
  3685  
  3686  // Copy returns a deep copy of the Job. It is expected that callers use recover.
  3687  // This job can panic if the deep copy failed as it uses reflection.
  3688  func (j *Job) Copy() *Job {
  3689  	if j == nil {
  3690  		return nil
  3691  	}
  3692  	nj := new(Job)
  3693  	*nj = *j
  3694  	nj.Datacenters = helper.CopySliceString(nj.Datacenters)
  3695  	nj.Constraints = CopySliceConstraints(nj.Constraints)
  3696  	nj.Affinities = CopySliceAffinities(nj.Affinities)
  3697  
  3698  	if j.TaskGroups != nil {
  3699  		tgs := make([]*TaskGroup, len(nj.TaskGroups))
  3700  		for i, tg := range nj.TaskGroups {
  3701  			tgs[i] = tg.Copy()
  3702  		}
  3703  		nj.TaskGroups = tgs
  3704  	}
  3705  
  3706  	nj.Periodic = nj.Periodic.Copy()
  3707  	nj.Meta = helper.CopyMapStringString(nj.Meta)
  3708  	nj.ParameterizedJob = nj.ParameterizedJob.Copy()
  3709  	return nj
  3710  }
  3711  
  3712  // Validate is used to sanity check a job input
  3713  func (j *Job) Validate() error {
  3714  	var mErr multierror.Error
  3715  
  3716  	if j.Region == "" {
  3717  		mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
  3718  	}
  3719  	if j.ID == "" {
  3720  		mErr.Errors = append(mErr.Errors, errors.New("Missing job ID"))
  3721  	} else if strings.Contains(j.ID, " ") {
  3722  		mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space"))
  3723  	}
  3724  	if j.Name == "" {
  3725  		mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
  3726  	}
  3727  	if j.Namespace == "" {
  3728  		mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace"))
  3729  	}
  3730  	switch j.Type {
  3731  	case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem:
  3732  	case "":
  3733  		mErr.Errors = append(mErr.Errors, errors.New("Missing job type"))
  3734  	default:
  3735  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type))
  3736  	}
  3737  	if j.Priority < JobMinPriority || j.Priority > JobMaxPriority {
  3738  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority))
  3739  	}
  3740  	if len(j.Datacenters) == 0 {
  3741  		mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters"))
  3742  	} else {
  3743  		for _, v := range j.Datacenters {
  3744  			if v == "" {
  3745  				mErr.Errors = append(mErr.Errors, errors.New("Job datacenter must be non-empty string"))
  3746  			}
  3747  		}
  3748  	}
  3749  	if len(j.TaskGroups) == 0 {
  3750  		mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
  3751  	}
  3752  	for idx, constr := range j.Constraints {
  3753  		if err := constr.Validate(); err != nil {
  3754  			outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
  3755  			mErr.Errors = append(mErr.Errors, outer)
  3756  		}
  3757  	}
  3758  	if j.Type == JobTypeSystem {
  3759  		if j.Affinities != nil {
  3760  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
  3761  		}
  3762  	} else {
  3763  		for idx, affinity := range j.Affinities {
  3764  			if err := affinity.Validate(); err != nil {
  3765  				outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
  3766  				mErr.Errors = append(mErr.Errors, outer)
  3767  			}
  3768  		}
  3769  	}
  3770  
  3771  	if j.Type == JobTypeSystem {
  3772  		if j.Spreads != nil {
  3773  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza"))
  3774  		}
  3775  	} else {
  3776  		for idx, spread := range j.Spreads {
  3777  			if err := spread.Validate(); err != nil {
  3778  				outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err)
  3779  				mErr.Errors = append(mErr.Errors, outer)
  3780  			}
  3781  		}
  3782  	}
  3783  
  3784  	// Check for duplicate task groups
  3785  	taskGroups := make(map[string]int)
  3786  	for idx, tg := range j.TaskGroups {
  3787  		if tg.Name == "" {
  3788  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1))
  3789  		} else if existing, ok := taskGroups[tg.Name]; ok {
  3790  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1))
  3791  		} else {
  3792  			taskGroups[tg.Name] = idx
  3793  		}
  3794  
  3795  		if tg.ShutdownDelay != nil && *tg.ShutdownDelay < 0 {
  3796  			mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
  3797  		}
  3798  
  3799  		if j.Type == "system" && tg.Count > 1 {
  3800  			mErr.Errors = append(mErr.Errors,
  3801  				fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler",
  3802  					tg.Name, tg.Count))
  3803  		}
  3804  	}
  3805  
  3806  	// Validate the task group
  3807  	for _, tg := range j.TaskGroups {
  3808  		if err := tg.Validate(j); err != nil {
  3809  			outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err)
  3810  			mErr.Errors = append(mErr.Errors, outer)
  3811  		}
  3812  	}
  3813  
  3814  	// Validate periodic is only used with batch jobs.
  3815  	if j.IsPeriodic() && j.Periodic.Enabled {
  3816  		if j.Type != JobTypeBatch {
  3817  			mErr.Errors = append(mErr.Errors,
  3818  				fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch))
  3819  		}
  3820  
  3821  		if err := j.Periodic.Validate(); err != nil {
  3822  			mErr.Errors = append(mErr.Errors, err)
  3823  		}
  3824  	}
  3825  
  3826  	if j.IsParameterized() {
  3827  		if j.Type != JobTypeBatch {
  3828  			mErr.Errors = append(mErr.Errors,
  3829  				fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch))
  3830  		}
  3831  
  3832  		if err := j.ParameterizedJob.Validate(); err != nil {
  3833  			mErr.Errors = append(mErr.Errors, err)
  3834  		}
  3835  	}
  3836  
  3837  	return mErr.ErrorOrNil()
  3838  }
  3839  
  3840  // Warnings returns a list of warnings that may be from dubious settings or
  3841  // deprecation warnings.
  3842  func (j *Job) Warnings() error {
  3843  	var mErr multierror.Error
  3844  
  3845  	// Check the groups
  3846  	ap := 0
  3847  	for _, tg := range j.TaskGroups {
  3848  		if err := tg.Warnings(j); err != nil {
  3849  			outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err)
  3850  			mErr.Errors = append(mErr.Errors, outer)
  3851  		}
  3852  		if tg.Update != nil && tg.Update.AutoPromote {
  3853  			ap += 1
  3854  		}
  3855  	}
  3856  
  3857  	// Check AutoPromote, should be all or none
  3858  	if ap > 0 && ap < len(j.TaskGroups) {
  3859  		err := fmt.Errorf("auto_promote must be true for all groups to enable automatic promotion")
  3860  		mErr.Errors = append(mErr.Errors, err)
  3861  	}
  3862  
  3863  	return mErr.ErrorOrNil()
  3864  }
  3865  
  3866  // LookupTaskGroup finds a task group by name
  3867  func (j *Job) LookupTaskGroup(name string) *TaskGroup {
  3868  	for _, tg := range j.TaskGroups {
  3869  		if tg.Name == name {
  3870  			return tg
  3871  		}
  3872  	}
  3873  	return nil
  3874  }
  3875  
  3876  // CombinedTaskMeta takes a TaskGroup and Task name and returns the combined
  3877  // meta data for the task. When joining Job, Group and Task Meta, the precedence
  3878  // is by deepest scope (Task > Group > Job).
  3879  func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string {
  3880  	group := j.LookupTaskGroup(groupName)
  3881  	if group == nil {
  3882  		return j.Meta
  3883  	}
  3884  
  3885  	var meta map[string]string
  3886  
  3887  	task := group.LookupTask(taskName)
  3888  	if task != nil {
  3889  		meta = helper.CopyMapStringString(task.Meta)
  3890  	}
  3891  
  3892  	if meta == nil {
  3893  		meta = make(map[string]string, len(group.Meta)+len(j.Meta))
  3894  	}
  3895  
  3896  	// Add the group specific meta
  3897  	for k, v := range group.Meta {
  3898  		if _, ok := meta[k]; !ok {
  3899  			meta[k] = v
  3900  		}
  3901  	}
  3902  
  3903  	// Add the job specific meta
  3904  	for k, v := range j.Meta {
  3905  		if _, ok := meta[k]; !ok {
  3906  			meta[k] = v
  3907  		}
  3908  	}
  3909  
  3910  	return meta
  3911  }
  3912  
  3913  // Stopped returns if a job is stopped.
  3914  func (j *Job) Stopped() bool {
  3915  	return j == nil || j.Stop
  3916  }
  3917  
  3918  // HasUpdateStrategy returns if any task group in the job has an update strategy
  3919  func (j *Job) HasUpdateStrategy() bool {
  3920  	for _, tg := range j.TaskGroups {
  3921  		if !tg.Update.IsEmpty() {
  3922  			return true
  3923  		}
  3924  	}
  3925  
  3926  	return false
  3927  }
  3928  
  3929  // Stub is used to return a summary of the job
  3930  func (j *Job) Stub(summary *JobSummary) *JobListStub {
  3931  	return &JobListStub{
  3932  		ID:                j.ID,
  3933  		ParentID:          j.ParentID,
  3934  		Name:              j.Name,
  3935  		Datacenters:       j.Datacenters,
  3936  		Type:              j.Type,
  3937  		Priority:          j.Priority,
  3938  		Periodic:          j.IsPeriodic(),
  3939  		ParameterizedJob:  j.IsParameterized(),
  3940  		Stop:              j.Stop,
  3941  		Status:            j.Status,
  3942  		StatusDescription: j.StatusDescription,
  3943  		CreateIndex:       j.CreateIndex,
  3944  		ModifyIndex:       j.ModifyIndex,
  3945  		JobModifyIndex:    j.JobModifyIndex,
  3946  		SubmitTime:        j.SubmitTime,
  3947  		JobSummary:        summary,
  3948  	}
  3949  }
  3950  
  3951  // IsPeriodic returns whether a job is periodic.
  3952  func (j *Job) IsPeriodic() bool {
  3953  	return j.Periodic != nil
  3954  }
  3955  
  3956  // IsPeriodicActive returns whether the job is an active periodic job that will
  3957  // create child jobs
  3958  func (j *Job) IsPeriodicActive() bool {
  3959  	return j.IsPeriodic() && j.Periodic.Enabled && !j.Stopped() && !j.IsParameterized()
  3960  }
  3961  
  3962  // IsParameterized returns whether a job is parameterized job.
  3963  func (j *Job) IsParameterized() bool {
  3964  	return j.ParameterizedJob != nil && !j.Dispatched
  3965  }
  3966  
  3967  // VaultPolicies returns the set of Vault policies per task group, per task
  3968  func (j *Job) VaultPolicies() map[string]map[string]*Vault {
  3969  	policies := make(map[string]map[string]*Vault, len(j.TaskGroups))
  3970  
  3971  	for _, tg := range j.TaskGroups {
  3972  		tgPolicies := make(map[string]*Vault, len(tg.Tasks))
  3973  
  3974  		for _, task := range tg.Tasks {
  3975  			if task.Vault == nil {
  3976  				continue
  3977  			}
  3978  
  3979  			tgPolicies[task.Name] = task.Vault
  3980  		}
  3981  
  3982  		if len(tgPolicies) != 0 {
  3983  			policies[tg.Name] = tgPolicies
  3984  		}
  3985  	}
  3986  
  3987  	return policies
  3988  }
  3989  
  3990  // Connect tasks returns the set of Consul Connect enabled tasks that will
  3991  // require a Service Identity token, if Consul ACLs are enabled.
  3992  //
  3993  // This method is meaningful only after the Job has passed through the job
  3994  // submission Mutator functions.
  3995  //
  3996  // task group -> []task
  3997  func (j *Job) ConnectTasks() map[string][]string {
  3998  	m := make(map[string][]string)
  3999  	for _, tg := range j.TaskGroups {
  4000  		for _, task := range tg.Tasks {
  4001  			if task.Kind.IsConnectProxy() {
  4002  				// todo(shoenig): when we support native, probably need to check
  4003  				//  an additional TBD TaskKind as well.
  4004  				m[tg.Name] = append(m[tg.Name], task.Name)
  4005  			}
  4006  		}
  4007  	}
  4008  	return m
  4009  }
  4010  
  4011  // RequiredSignals returns a mapping of task groups to tasks to their required
  4012  // set of signals
  4013  func (j *Job) RequiredSignals() map[string]map[string][]string {
  4014  	signals := make(map[string]map[string][]string)
  4015  
  4016  	for _, tg := range j.TaskGroups {
  4017  		for _, task := range tg.Tasks {
  4018  			// Use this local one as a set
  4019  			taskSignals := make(map[string]struct{})
  4020  
  4021  			// Check if the Vault change mode uses signals
  4022  			if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal {
  4023  				taskSignals[task.Vault.ChangeSignal] = struct{}{}
  4024  			}
  4025  
  4026  			// If a user has specified a KillSignal, add it to required signals
  4027  			if task.KillSignal != "" {
  4028  				taskSignals[task.KillSignal] = struct{}{}
  4029  			}
  4030  
  4031  			// Check if any template change mode uses signals
  4032  			for _, t := range task.Templates {
  4033  				if t.ChangeMode != TemplateChangeModeSignal {
  4034  					continue
  4035  				}
  4036  
  4037  				taskSignals[t.ChangeSignal] = struct{}{}
  4038  			}
  4039  
  4040  			// Flatten and sort the signals
  4041  			l := len(taskSignals)
  4042  			if l == 0 {
  4043  				continue
  4044  			}
  4045  
  4046  			flat := make([]string, 0, l)
  4047  			for sig := range taskSignals {
  4048  				flat = append(flat, sig)
  4049  			}
  4050  
  4051  			sort.Strings(flat)
  4052  			tgSignals, ok := signals[tg.Name]
  4053  			if !ok {
  4054  				tgSignals = make(map[string][]string)
  4055  				signals[tg.Name] = tgSignals
  4056  			}
  4057  			tgSignals[task.Name] = flat
  4058  		}
  4059  
  4060  	}
  4061  
  4062  	return signals
  4063  }
  4064  
  4065  // SpecChanged determines if the functional specification has changed between
  4066  // two job versions.
  4067  func (j *Job) SpecChanged(new *Job) bool {
  4068  	if j == nil {
  4069  		return new != nil
  4070  	}
  4071  
  4072  	// Create a copy of the new job
  4073  	c := new.Copy()
  4074  
  4075  	// Update the new job so we can do a reflect
  4076  	c.Status = j.Status
  4077  	c.StatusDescription = j.StatusDescription
  4078  	c.Stable = j.Stable
  4079  	c.Version = j.Version
  4080  	c.CreateIndex = j.CreateIndex
  4081  	c.ModifyIndex = j.ModifyIndex
  4082  	c.JobModifyIndex = j.JobModifyIndex
  4083  	c.SubmitTime = j.SubmitTime
  4084  
  4085  	// cgbaker: FINISH: probably need some consideration of scaling policy ID here
  4086  
  4087  	// Deep equals the jobs
  4088  	return !reflect.DeepEqual(j, c)
  4089  }
  4090  
  4091  func (j *Job) SetSubmitTime() {
  4092  	j.SubmitTime = time.Now().UTC().UnixNano()
  4093  }
  4094  
  4095  // JobListStub is used to return a subset of job information
  4096  // for the job list
  4097  type JobListStub struct {
  4098  	ID                string
  4099  	ParentID          string
  4100  	Name              string
  4101  	Datacenters       []string
  4102  	Type              string
  4103  	Priority          int
  4104  	Periodic          bool
  4105  	ParameterizedJob  bool
  4106  	Stop              bool
  4107  	Status            string
  4108  	StatusDescription string
  4109  	JobSummary        *JobSummary
  4110  	CreateIndex       uint64
  4111  	ModifyIndex       uint64
  4112  	JobModifyIndex    uint64
  4113  	SubmitTime        int64
  4114  }
  4115  
  4116  // JobSummary summarizes the state of the allocations of a job
  4117  type JobSummary struct {
  4118  	// JobID is the ID of the job the summary is for
  4119  	JobID string
  4120  
  4121  	// Namespace is the namespace of the job and its summary
  4122  	Namespace string
  4123  
  4124  	// Summary contains the summary per task group for the Job
  4125  	Summary map[string]TaskGroupSummary
  4126  
  4127  	// Children contains a summary for the children of this job.
  4128  	Children *JobChildrenSummary
  4129  
  4130  	// Raft Indexes
  4131  	CreateIndex uint64
  4132  	ModifyIndex uint64
  4133  }
  4134  
  4135  // Copy returns a new copy of JobSummary
  4136  func (js *JobSummary) Copy() *JobSummary {
  4137  	newJobSummary := new(JobSummary)
  4138  	*newJobSummary = *js
  4139  	newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary))
  4140  	for k, v := range js.Summary {
  4141  		newTGSummary[k] = v
  4142  	}
  4143  	newJobSummary.Summary = newTGSummary
  4144  	newJobSummary.Children = newJobSummary.Children.Copy()
  4145  	return newJobSummary
  4146  }
  4147  
  4148  // JobChildrenSummary contains the summary of children job statuses
  4149  type JobChildrenSummary struct {
  4150  	Pending int64
  4151  	Running int64
  4152  	Dead    int64
  4153  }
  4154  
  4155  // Copy returns a new copy of a JobChildrenSummary
  4156  func (jc *JobChildrenSummary) Copy() *JobChildrenSummary {
  4157  	if jc == nil {
  4158  		return nil
  4159  	}
  4160  
  4161  	njc := new(JobChildrenSummary)
  4162  	*njc = *jc
  4163  	return njc
  4164  }
  4165  
  4166  // TaskGroup summarizes the state of all the allocations of a particular
  4167  // TaskGroup
  4168  type TaskGroupSummary struct {
  4169  	Queued   int
  4170  	Complete int
  4171  	Failed   int
  4172  	Running  int
  4173  	Starting int
  4174  	Lost     int
  4175  }
  4176  
  4177  const (
  4178  	// Checks uses any registered health check state in combination with task
  4179  	// states to determine if a allocation is healthy.
  4180  	UpdateStrategyHealthCheck_Checks = "checks"
  4181  
  4182  	// TaskStates uses the task states of an allocation to determine if the
  4183  	// allocation is healthy.
  4184  	UpdateStrategyHealthCheck_TaskStates = "task_states"
  4185  
  4186  	// Manual allows the operator to manually signal to Nomad when an
  4187  	// allocations is healthy. This allows more advanced health checking that is
  4188  	// outside of the scope of Nomad.
  4189  	UpdateStrategyHealthCheck_Manual = "manual"
  4190  )
  4191  
  4192  var (
  4193  	// DefaultUpdateStrategy provides a baseline that can be used to upgrade
  4194  	// jobs with the old policy or for populating field defaults.
  4195  	DefaultUpdateStrategy = &UpdateStrategy{
  4196  		Stagger:          30 * time.Second,
  4197  		MaxParallel:      1,
  4198  		HealthCheck:      UpdateStrategyHealthCheck_Checks,
  4199  		MinHealthyTime:   10 * time.Second,
  4200  		HealthyDeadline:  5 * time.Minute,
  4201  		ProgressDeadline: 10 * time.Minute,
  4202  		AutoRevert:       false,
  4203  		AutoPromote:      false,
  4204  		Canary:           0,
  4205  	}
  4206  )
  4207  
  4208  // UpdateStrategy is used to modify how updates are done
  4209  type UpdateStrategy struct {
  4210  	// Stagger is used to determine the rate at which allocations are migrated
  4211  	// due to down or draining nodes.
  4212  	Stagger time.Duration
  4213  
  4214  	// MaxParallel is how many updates can be done in parallel
  4215  	MaxParallel int
  4216  
  4217  	// HealthCheck specifies the mechanism in which allocations are marked
  4218  	// healthy or unhealthy as part of a deployment.
  4219  	HealthCheck string
  4220  
  4221  	// MinHealthyTime is the minimum time an allocation must be in the healthy
  4222  	// state before it is marked as healthy, unblocking more allocations to be
  4223  	// rolled.
  4224  	MinHealthyTime time.Duration
  4225  
  4226  	// HealthyDeadline is the time in which an allocation must be marked as
  4227  	// healthy before it is automatically transitioned to unhealthy. This time
  4228  	// period doesn't count against the MinHealthyTime.
  4229  	HealthyDeadline time.Duration
  4230  
  4231  	// ProgressDeadline is the time in which an allocation as part of the
  4232  	// deployment must transition to healthy. If no allocation becomes healthy
  4233  	// after the deadline, the deployment is marked as failed. If the deadline
  4234  	// is zero, the first failure causes the deployment to fail.
  4235  	ProgressDeadline time.Duration
  4236  
  4237  	// AutoRevert declares that if a deployment fails because of unhealthy
  4238  	// allocations, there should be an attempt to auto-revert the job to a
  4239  	// stable version.
  4240  	AutoRevert bool
  4241  
  4242  	// AutoPromote declares that the deployment should be promoted when all canaries are
  4243  	// healthy
  4244  	AutoPromote bool
  4245  
  4246  	// Canary is the number of canaries to deploy when a change to the task
  4247  	// group is detected.
  4248  	Canary int
  4249  }
  4250  
  4251  func (u *UpdateStrategy) Copy() *UpdateStrategy {
  4252  	if u == nil {
  4253  		return nil
  4254  	}
  4255  
  4256  	copy := new(UpdateStrategy)
  4257  	*copy = *u
  4258  	return copy
  4259  }
  4260  
  4261  func (u *UpdateStrategy) Validate() error {
  4262  	if u == nil {
  4263  		return nil
  4264  	}
  4265  
  4266  	var mErr multierror.Error
  4267  	switch u.HealthCheck {
  4268  	case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
  4269  	default:
  4270  		multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
  4271  	}
  4272  
  4273  	if u.MaxParallel < 0 {
  4274  		multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel))
  4275  	}
  4276  	if u.Canary < 0 {
  4277  		multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
  4278  	}
  4279  	if u.Canary == 0 && u.AutoPromote {
  4280  		multierror.Append(&mErr, fmt.Errorf("Auto Promote requires a Canary count greater than zero"))
  4281  	}
  4282  	if u.MinHealthyTime < 0 {
  4283  		multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
  4284  	}
  4285  	if u.HealthyDeadline <= 0 {
  4286  		multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
  4287  	}
  4288  	if u.ProgressDeadline < 0 {
  4289  		multierror.Append(&mErr, fmt.Errorf("Progress deadline must be zero or greater: %v", u.ProgressDeadline))
  4290  	}
  4291  	if u.MinHealthyTime >= u.HealthyDeadline {
  4292  		multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline))
  4293  	}
  4294  	if u.ProgressDeadline != 0 && u.HealthyDeadline >= u.ProgressDeadline {
  4295  		multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be less than progress deadline: %v > %v", u.HealthyDeadline, u.ProgressDeadline))
  4296  	}
  4297  	if u.Stagger <= 0 {
  4298  		multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger))
  4299  	}
  4300  
  4301  	return mErr.ErrorOrNil()
  4302  }
  4303  
  4304  func (u *UpdateStrategy) IsEmpty() bool {
  4305  	if u == nil {
  4306  		return true
  4307  	}
  4308  
  4309  	return u.MaxParallel == 0
  4310  }
  4311  
  4312  // TODO(alexdadgar): Remove once no longer used by the scheduler.
  4313  // Rolling returns if a rolling strategy should be used
  4314  func (u *UpdateStrategy) Rolling() bool {
  4315  	return u.Stagger > 0 && u.MaxParallel > 0
  4316  }
  4317  
  4318  const (
  4319  	// PeriodicSpecCron is used for a cron spec.
  4320  	PeriodicSpecCron = "cron"
  4321  
  4322  	// PeriodicSpecTest is only used by unit tests. It is a sorted, comma
  4323  	// separated list of unix timestamps at which to launch.
  4324  	PeriodicSpecTest = "_internal_test"
  4325  )
  4326  
  4327  // Periodic defines the interval a job should be run at.
  4328  type PeriodicConfig struct {
  4329  	// Enabled determines if the job should be run periodically.
  4330  	Enabled bool
  4331  
  4332  	// Spec specifies the interval the job should be run as. It is parsed based
  4333  	// on the SpecType.
  4334  	Spec string
  4335  
  4336  	// SpecType defines the format of the spec.
  4337  	SpecType string
  4338  
  4339  	// ProhibitOverlap enforces that spawned jobs do not run in parallel.
  4340  	ProhibitOverlap bool
  4341  
  4342  	// TimeZone is the user specified string that determines the time zone to
  4343  	// launch against. The time zones must be specified from IANA Time Zone
  4344  	// database, such as "America/New_York".
  4345  	// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
  4346  	// Reference: https://www.iana.org/time-zones
  4347  	TimeZone string
  4348  
  4349  	// location is the time zone to evaluate the launch time against
  4350  	location *time.Location
  4351  }
  4352  
  4353  func (p *PeriodicConfig) Copy() *PeriodicConfig {
  4354  	if p == nil {
  4355  		return nil
  4356  	}
  4357  	np := new(PeriodicConfig)
  4358  	*np = *p
  4359  	return np
  4360  }
  4361  
  4362  func (p *PeriodicConfig) Validate() error {
  4363  	if !p.Enabled {
  4364  		return nil
  4365  	}
  4366  
  4367  	var mErr multierror.Error
  4368  	if p.Spec == "" {
  4369  		multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
  4370  	}
  4371  
  4372  	// Check if we got a valid time zone
  4373  	if p.TimeZone != "" {
  4374  		if _, err := time.LoadLocation(p.TimeZone); err != nil {
  4375  			multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
  4376  		}
  4377  	}
  4378  
  4379  	switch p.SpecType {
  4380  	case PeriodicSpecCron:
  4381  		// Validate the cron spec
  4382  		if _, err := cronexpr.Parse(p.Spec); err != nil {
  4383  			multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
  4384  		}
  4385  	case PeriodicSpecTest:
  4386  		// No-op
  4387  	default:
  4388  		multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
  4389  	}
  4390  
  4391  	return mErr.ErrorOrNil()
  4392  }
  4393  
  4394  func (p *PeriodicConfig) Canonicalize() {
  4395  	// Load the location
  4396  	l, err := time.LoadLocation(p.TimeZone)
  4397  	if err != nil {
  4398  		p.location = time.UTC
  4399  	}
  4400  
  4401  	p.location = l
  4402  }
  4403  
  4404  // CronParseNext is a helper that parses the next time for the given expression
  4405  // but captures any panic that may occur in the underlying library.
  4406  func CronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) {
  4407  	defer func() {
  4408  		if recover() != nil {
  4409  			t = time.Time{}
  4410  			err = fmt.Errorf("failed parsing cron expression: %q", spec)
  4411  		}
  4412  	}()
  4413  
  4414  	return e.Next(fromTime), nil
  4415  }
  4416  
  4417  // Next returns the closest time instant matching the spec that is after the
  4418  // passed time. If no matching instance exists, the zero value of time.Time is
  4419  // returned. The `time.Location` of the returned value matches that of the
  4420  // passed time.
  4421  func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) {
  4422  	switch p.SpecType {
  4423  	case PeriodicSpecCron:
  4424  		if e, err := cronexpr.Parse(p.Spec); err == nil {
  4425  			return CronParseNext(e, fromTime, p.Spec)
  4426  		}
  4427  	case PeriodicSpecTest:
  4428  		split := strings.Split(p.Spec, ",")
  4429  		if len(split) == 1 && split[0] == "" {
  4430  			return time.Time{}, nil
  4431  		}
  4432  
  4433  		// Parse the times
  4434  		times := make([]time.Time, len(split))
  4435  		for i, s := range split {
  4436  			unix, err := strconv.Atoi(s)
  4437  			if err != nil {
  4438  				return time.Time{}, nil
  4439  			}
  4440  
  4441  			times[i] = time.Unix(int64(unix), 0)
  4442  		}
  4443  
  4444  		// Find the next match
  4445  		for _, next := range times {
  4446  			if fromTime.Before(next) {
  4447  				return next, nil
  4448  			}
  4449  		}
  4450  	}
  4451  
  4452  	return time.Time{}, nil
  4453  }
  4454  
  4455  // GetLocation returns the location to use for determining the time zone to run
  4456  // the periodic job against.
  4457  func (p *PeriodicConfig) GetLocation() *time.Location {
  4458  	// Jobs pre 0.5.5 will not have this
  4459  	if p.location != nil {
  4460  		return p.location
  4461  	}
  4462  
  4463  	return time.UTC
  4464  }
  4465  
  4466  const (
  4467  	// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
  4468  	// when launching derived instances of it.
  4469  	PeriodicLaunchSuffix = "/periodic-"
  4470  )
  4471  
  4472  // PeriodicLaunch tracks the last launch time of a periodic job.
  4473  type PeriodicLaunch struct {
  4474  	ID        string    // ID of the periodic job.
  4475  	Namespace string    // Namespace of the periodic job
  4476  	Launch    time.Time // The last launch time.
  4477  
  4478  	// Raft Indexes
  4479  	CreateIndex uint64
  4480  	ModifyIndex uint64
  4481  }
  4482  
  4483  const (
  4484  	DispatchPayloadForbidden = "forbidden"
  4485  	DispatchPayloadOptional  = "optional"
  4486  	DispatchPayloadRequired  = "required"
  4487  
  4488  	// DispatchLaunchSuffix is the string appended to the parameterized job's ID
  4489  	// when dispatching instances of it.
  4490  	DispatchLaunchSuffix = "/dispatch-"
  4491  )
  4492  
  4493  // ParameterizedJobConfig is used to configure the parameterized job
  4494  type ParameterizedJobConfig struct {
  4495  	// Payload configure the payload requirements
  4496  	Payload string
  4497  
  4498  	// MetaRequired is metadata keys that must be specified by the dispatcher
  4499  	MetaRequired []string
  4500  
  4501  	// MetaOptional is metadata keys that may be specified by the dispatcher
  4502  	MetaOptional []string
  4503  }
  4504  
  4505  func (d *ParameterizedJobConfig) Validate() error {
  4506  	var mErr multierror.Error
  4507  	switch d.Payload {
  4508  	case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden:
  4509  	default:
  4510  		multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload))
  4511  	}
  4512  
  4513  	// Check that the meta configurations are disjoint sets
  4514  	disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional)
  4515  	if !disjoint {
  4516  		multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending))
  4517  	}
  4518  
  4519  	return mErr.ErrorOrNil()
  4520  }
  4521  
  4522  func (d *ParameterizedJobConfig) Canonicalize() {
  4523  	if d.Payload == "" {
  4524  		d.Payload = DispatchPayloadOptional
  4525  	}
  4526  }
  4527  
  4528  func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig {
  4529  	if d == nil {
  4530  		return nil
  4531  	}
  4532  	nd := new(ParameterizedJobConfig)
  4533  	*nd = *d
  4534  	nd.MetaOptional = helper.CopySliceString(nd.MetaOptional)
  4535  	nd.MetaRequired = helper.CopySliceString(nd.MetaRequired)
  4536  	return nd
  4537  }
  4538  
  4539  // DispatchedID returns an ID appropriate for a job dispatched against a
  4540  // particular parameterized job
  4541  func DispatchedID(templateID string, t time.Time) string {
  4542  	u := uuid.Generate()[:8]
  4543  	return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
  4544  }
  4545  
  4546  // DispatchPayloadConfig configures how a task gets its input from a job dispatch
  4547  type DispatchPayloadConfig struct {
  4548  	// File specifies a relative path to where the input data should be written
  4549  	File string
  4550  }
  4551  
  4552  func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
  4553  	if d == nil {
  4554  		return nil
  4555  	}
  4556  	nd := new(DispatchPayloadConfig)
  4557  	*nd = *d
  4558  	return nd
  4559  }
  4560  
  4561  func (d *DispatchPayloadConfig) Validate() error {
  4562  	// Verify the destination doesn't escape
  4563  	escaped, err := PathEscapesAllocDir("task/local/", d.File)
  4564  	if err != nil {
  4565  		return fmt.Errorf("invalid destination path: %v", err)
  4566  	} else if escaped {
  4567  		return fmt.Errorf("destination escapes allocation directory")
  4568  	}
  4569  
  4570  	return nil
  4571  }
  4572  
  4573  const (
  4574  	TaskLifecycleHookPrestart = "prestart"
  4575  )
  4576  
  4577  type TaskLifecycleConfig struct {
  4578  	Hook    string
  4579  	Sidecar bool
  4580  }
  4581  
  4582  func (d *TaskLifecycleConfig) Copy() *TaskLifecycleConfig {
  4583  	if d == nil {
  4584  		return nil
  4585  	}
  4586  	nd := new(TaskLifecycleConfig)
  4587  	*nd = *d
  4588  	return nd
  4589  }
  4590  
  4591  func (d *TaskLifecycleConfig) Validate() error {
  4592  	if d == nil {
  4593  		return nil
  4594  	}
  4595  
  4596  	switch d.Hook {
  4597  	case TaskLifecycleHookPrestart:
  4598  	case "":
  4599  		return fmt.Errorf("no lifecycle hook provided")
  4600  	default:
  4601  		return fmt.Errorf("invalid hook: %v", d.Hook)
  4602  	}
  4603  
  4604  	return nil
  4605  }
  4606  
  4607  var (
  4608  	// These default restart policies needs to be in sync with
  4609  	// Canonicalize in api/tasks.go
  4610  
  4611  	DefaultServiceJobRestartPolicy = RestartPolicy{
  4612  		Delay:    15 * time.Second,
  4613  		Attempts: 2,
  4614  		Interval: 30 * time.Minute,
  4615  		Mode:     RestartPolicyModeFail,
  4616  	}
  4617  	DefaultBatchJobRestartPolicy = RestartPolicy{
  4618  		Delay:    15 * time.Second,
  4619  		Attempts: 3,
  4620  		Interval: 24 * time.Hour,
  4621  		Mode:     RestartPolicyModeFail,
  4622  	}
  4623  )
  4624  
  4625  var (
  4626  	// These default reschedule policies needs to be in sync with
  4627  	// NewDefaultReschedulePolicy in api/tasks.go
  4628  
  4629  	DefaultServiceJobReschedulePolicy = ReschedulePolicy{
  4630  		Delay:         30 * time.Second,
  4631  		DelayFunction: "exponential",
  4632  		MaxDelay:      1 * time.Hour,
  4633  		Unlimited:     true,
  4634  	}
  4635  	DefaultBatchJobReschedulePolicy = ReschedulePolicy{
  4636  		Attempts:      1,
  4637  		Interval:      24 * time.Hour,
  4638  		Delay:         5 * time.Second,
  4639  		DelayFunction: "constant",
  4640  	}
  4641  )
  4642  
  4643  const (
  4644  	// RestartPolicyModeDelay causes an artificial delay till the next interval is
  4645  	// reached when the specified attempts have been reached in the interval.
  4646  	RestartPolicyModeDelay = "delay"
  4647  
  4648  	// RestartPolicyModeFail causes a job to fail if the specified number of
  4649  	// attempts are reached within an interval.
  4650  	RestartPolicyModeFail = "fail"
  4651  
  4652  	// RestartPolicyMinInterval is the minimum interval that is accepted for a
  4653  	// restart policy.
  4654  	RestartPolicyMinInterval = 5 * time.Second
  4655  
  4656  	// ReasonWithinPolicy describes restart events that are within policy
  4657  	ReasonWithinPolicy = "Restart within policy"
  4658  )
  4659  
  4660  // JobScalingEvents contains the scaling events for a given job
  4661  type JobScalingEvents struct {
  4662  	Namespace string
  4663  	JobID     string
  4664  
  4665  	// This map is indexed by target; currently, this is just task group
  4666  	// the indexed array is sorted from newest to oldest event
  4667  	// the array should have less than JobTrackedScalingEvents entries
  4668  	ScalingEvents map[string][]*ScalingEvent
  4669  
  4670  	// Raft index
  4671  	ModifyIndex uint64
  4672  }
  4673  
  4674  // Factory method for ScalingEvent objects
  4675  func NewScalingEvent(message string) *ScalingEvent {
  4676  	return &ScalingEvent{
  4677  		Time:    time.Now().Unix(),
  4678  		Message: message,
  4679  	}
  4680  }
  4681  
  4682  // ScalingEvent describes a scaling event against a Job
  4683  type ScalingEvent struct {
  4684  	// Unix Nanosecond timestamp for the scaling event
  4685  	Time int64
  4686  
  4687  	// Count is the new scaling count, if provided
  4688  	Count *int64
  4689  
  4690  	// Message is the message describing a scaling event
  4691  	Message string
  4692  
  4693  	// Error indicates an error state for this scaling event
  4694  	Error bool
  4695  
  4696  	// Meta is a map of metadata returned during a scaling event
  4697  	Meta map[string]interface{}
  4698  
  4699  	// EvalID is the ID for an evaluation if one was created as part of a scaling event
  4700  	EvalID *string
  4701  
  4702  	// Raft index
  4703  	CreateIndex uint64
  4704  }
  4705  
  4706  func (e *ScalingEvent) SetError(error bool) *ScalingEvent {
  4707  	e.Error = error
  4708  	return e
  4709  }
  4710  
  4711  func (e *ScalingEvent) SetMeta(meta map[string]interface{}) *ScalingEvent {
  4712  	e.Meta = meta
  4713  	return e
  4714  }
  4715  
  4716  func (e *ScalingEvent) SetEvalID(evalID string) *ScalingEvent {
  4717  	e.EvalID = &evalID
  4718  	return e
  4719  }
  4720  
  4721  // ScalingEventRequest is by for Job.Scale endpoint
  4722  // to register scaling events
  4723  type ScalingEventRequest struct {
  4724  	Namespace string
  4725  	JobID     string
  4726  	TaskGroup string
  4727  
  4728  	ScalingEvent *ScalingEvent
  4729  }
  4730  
  4731  // ScalingPolicy specifies the scaling policy for a scaling target
  4732  type ScalingPolicy struct {
  4733  	// ID is a generated UUID used for looking up the scaling policy
  4734  	ID string
  4735  
  4736  	// Target contains information about the target of the scaling policy, like job and group
  4737  	Target map[string]string
  4738  
  4739  	// Policy is an opaque description of the scaling policy, passed to the autoscaler
  4740  	Policy map[string]interface{}
  4741  
  4742  	// Min is the minimum allowable scaling count for this target
  4743  	Min int64
  4744  
  4745  	// Max is the maximum allowable scaling count for this target
  4746  	Max int64
  4747  
  4748  	// Enabled indicates whether this policy has been enabled/disabled
  4749  	Enabled bool
  4750  
  4751  	CreateIndex uint64
  4752  	ModifyIndex uint64
  4753  }
  4754  
  4755  const (
  4756  	ScalingTargetNamespace = "Namespace"
  4757  	ScalingTargetJob       = "Job"
  4758  	ScalingTargetGroup     = "Group"
  4759  )
  4760  
  4761  // Diff indicates whether the specification for a given scaling policy has changed
  4762  func (p *ScalingPolicy) Diff(p2 *ScalingPolicy) bool {
  4763  	copy := *p2
  4764  	copy.ID = p.ID
  4765  	copy.CreateIndex = p.CreateIndex
  4766  	copy.ModifyIndex = p.ModifyIndex
  4767  	return !reflect.DeepEqual(*p, copy)
  4768  }
  4769  
  4770  func (p *ScalingPolicy) TargetTaskGroup(job *Job, tg *TaskGroup) *ScalingPolicy {
  4771  	p.Target = map[string]string{
  4772  		ScalingTargetNamespace: job.Namespace,
  4773  		ScalingTargetJob:       job.ID,
  4774  		ScalingTargetGroup:     tg.Name,
  4775  	}
  4776  	return p
  4777  }
  4778  
  4779  func (p *ScalingPolicy) Stub() *ScalingPolicyListStub {
  4780  	stub := &ScalingPolicyListStub{
  4781  		ID:          p.ID,
  4782  		Target:      make(map[string]string),
  4783  		Enabled:     p.Enabled,
  4784  		CreateIndex: p.CreateIndex,
  4785  		ModifyIndex: p.ModifyIndex,
  4786  	}
  4787  	for k, v := range p.Target {
  4788  		stub.Target[k] = v
  4789  	}
  4790  	return stub
  4791  }
  4792  
  4793  // GetScalingPolicies returns a slice of all scaling scaling policies for this job
  4794  func (j *Job) GetScalingPolicies() []*ScalingPolicy {
  4795  	ret := make([]*ScalingPolicy, 0)
  4796  
  4797  	for _, tg := range j.TaskGroups {
  4798  		if tg.Scaling != nil {
  4799  			ret = append(ret, tg.Scaling)
  4800  		}
  4801  	}
  4802  
  4803  	return ret
  4804  }
  4805  
  4806  // ScalingPolicyListStub is used to return a subset of scaling policy information
  4807  // for the scaling policy list
  4808  type ScalingPolicyListStub struct {
  4809  	ID          string
  4810  	Enabled     bool
  4811  	Target      map[string]string
  4812  	CreateIndex uint64
  4813  	ModifyIndex uint64
  4814  }
  4815  
  4816  // RestartPolicy configures how Tasks are restarted when they crash or fail.
  4817  type RestartPolicy struct {
  4818  	// Attempts is the number of restart that will occur in an interval.
  4819  	Attempts int
  4820  
  4821  	// Interval is a duration in which we can limit the number of restarts
  4822  	// within.
  4823  	Interval time.Duration
  4824  
  4825  	// Delay is the time between a failure and a restart.
  4826  	Delay time.Duration
  4827  
  4828  	// Mode controls what happens when the task restarts more than attempt times
  4829  	// in an interval.
  4830  	Mode string
  4831  }
  4832  
  4833  func (r *RestartPolicy) Copy() *RestartPolicy {
  4834  	if r == nil {
  4835  		return nil
  4836  	}
  4837  	nrp := new(RestartPolicy)
  4838  	*nrp = *r
  4839  	return nrp
  4840  }
  4841  
  4842  func (r *RestartPolicy) Validate() error {
  4843  	var mErr multierror.Error
  4844  	switch r.Mode {
  4845  	case RestartPolicyModeDelay, RestartPolicyModeFail:
  4846  	default:
  4847  		multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode))
  4848  	}
  4849  
  4850  	// Check for ambiguous/confusing settings
  4851  	if r.Attempts == 0 && r.Mode != RestartPolicyModeFail {
  4852  		multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts))
  4853  	}
  4854  
  4855  	if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() {
  4856  		multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval))
  4857  	}
  4858  	if time.Duration(r.Attempts)*r.Delay > r.Interval {
  4859  		multierror.Append(&mErr,
  4860  			fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay))
  4861  	}
  4862  	return mErr.ErrorOrNil()
  4863  }
  4864  
  4865  func NewRestartPolicy(jobType string) *RestartPolicy {
  4866  	switch jobType {
  4867  	case JobTypeService, JobTypeSystem:
  4868  		rp := DefaultServiceJobRestartPolicy
  4869  		return &rp
  4870  	case JobTypeBatch:
  4871  		rp := DefaultBatchJobRestartPolicy
  4872  		return &rp
  4873  	}
  4874  	return nil
  4875  }
  4876  
  4877  const ReschedulePolicyMinInterval = 15 * time.Second
  4878  const ReschedulePolicyMinDelay = 5 * time.Second
  4879  
  4880  var RescheduleDelayFunctions = [...]string{"constant", "exponential", "fibonacci"}
  4881  
  4882  // ReschedulePolicy configures how Tasks are rescheduled  when they crash or fail.
  4883  type ReschedulePolicy struct {
  4884  	// Attempts limits the number of rescheduling attempts that can occur in an interval.
  4885  	Attempts int
  4886  
  4887  	// Interval is a duration in which we can limit the number of reschedule attempts.
  4888  	Interval time.Duration
  4889  
  4890  	// Delay is a minimum duration to wait between reschedule attempts.
  4891  	// The delay function determines how much subsequent reschedule attempts are delayed by.
  4892  	Delay time.Duration
  4893  
  4894  	// DelayFunction determines how the delay progressively changes on subsequent reschedule
  4895  	// attempts. Valid values are "exponential", "constant", and "fibonacci".
  4896  	DelayFunction string
  4897  
  4898  	// MaxDelay is an upper bound on the delay.
  4899  	MaxDelay time.Duration
  4900  
  4901  	// Unlimited allows infinite rescheduling attempts. Only allowed when delay is set
  4902  	// between reschedule attempts.
  4903  	Unlimited bool
  4904  }
  4905  
  4906  func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
  4907  	if r == nil {
  4908  		return nil
  4909  	}
  4910  	nrp := new(ReschedulePolicy)
  4911  	*nrp = *r
  4912  	return nrp
  4913  }
  4914  
  4915  func (r *ReschedulePolicy) Enabled() bool {
  4916  	enabled := r != nil && (r.Attempts > 0 || r.Unlimited)
  4917  	return enabled
  4918  }
  4919  
  4920  // Validate uses different criteria to validate the reschedule policy
  4921  // Delay must be a minimum of 5 seconds
  4922  // Delay Ceiling is ignored if Delay Function is "constant"
  4923  // Number of possible attempts is validated, given the interval, delay and delay function
  4924  func (r *ReschedulePolicy) Validate() error {
  4925  	if !r.Enabled() {
  4926  		return nil
  4927  	}
  4928  	var mErr multierror.Error
  4929  	// Check for ambiguous/confusing settings
  4930  	if r.Attempts > 0 {
  4931  		if r.Interval <= 0 {
  4932  			multierror.Append(&mErr, fmt.Errorf("Interval must be a non zero value if Attempts > 0"))
  4933  		}
  4934  		if r.Unlimited {
  4935  			multierror.Append(&mErr, fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, "+
  4936  				"and Unlimited = %v is ambiguous", r.Attempts, r.Interval, r.Unlimited))
  4937  			multierror.Append(&mErr, errors.New("If Attempts >0, Unlimited cannot also be set to true"))
  4938  		}
  4939  	}
  4940  
  4941  	delayPreCheck := true
  4942  	// Delay should be bigger than the default
  4943  	if r.Delay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() {
  4944  		multierror.Append(&mErr, fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay))
  4945  		delayPreCheck = false
  4946  	}
  4947  
  4948  	// Must use a valid delay function
  4949  	if !isValidDelayFunction(r.DelayFunction) {
  4950  		multierror.Append(&mErr, fmt.Errorf("Invalid delay function %q, must be one of %q", r.DelayFunction, RescheduleDelayFunctions))
  4951  		delayPreCheck = false
  4952  	}
  4953  
  4954  	// Validate MaxDelay if not using linear delay progression
  4955  	if r.DelayFunction != "constant" {
  4956  		if r.MaxDelay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() {
  4957  			multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay))
  4958  			delayPreCheck = false
  4959  		}
  4960  		if r.MaxDelay < r.Delay {
  4961  			multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", r.Delay, r.MaxDelay))
  4962  			delayPreCheck = false
  4963  		}
  4964  
  4965  	}
  4966  
  4967  	// Validate Interval and other delay parameters if attempts are limited
  4968  	if !r.Unlimited {
  4969  		if r.Interval.Nanoseconds() < ReschedulePolicyMinInterval.Nanoseconds() {
  4970  			multierror.Append(&mErr, fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, r.Interval))
  4971  		}
  4972  		if !delayPreCheck {
  4973  			// We can't cross validate the rest of the delay params if delayPreCheck fails, so return early
  4974  			return mErr.ErrorOrNil()
  4975  		}
  4976  		crossValidationErr := r.validateDelayParams()
  4977  		if crossValidationErr != nil {
  4978  			multierror.Append(&mErr, crossValidationErr)
  4979  		}
  4980  	}
  4981  	return mErr.ErrorOrNil()
  4982  }
  4983  
  4984  func isValidDelayFunction(delayFunc string) bool {
  4985  	for _, value := range RescheduleDelayFunctions {
  4986  		if value == delayFunc {
  4987  			return true
  4988  		}
  4989  	}
  4990  	return false
  4991  }
  4992  
  4993  func (r *ReschedulePolicy) validateDelayParams() error {
  4994  	ok, possibleAttempts, recommendedInterval := r.viableAttempts()
  4995  	if ok {
  4996  		return nil
  4997  	}
  4998  	var mErr multierror.Error
  4999  	if r.DelayFunction == "constant" {
  5000  		multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and "+
  5001  			"delay function %q", possibleAttempts, r.Interval, r.Delay, r.DelayFunction))
  5002  	} else {
  5003  		multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  5004  			"delay function %q, and delay ceiling %v", possibleAttempts, r.Interval, r.Delay, r.DelayFunction, r.MaxDelay))
  5005  	}
  5006  	multierror.Append(&mErr, fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", recommendedInterval.Round(time.Second), r.Attempts))
  5007  	return mErr.ErrorOrNil()
  5008  }
  5009  
  5010  func (r *ReschedulePolicy) viableAttempts() (bool, int, time.Duration) {
  5011  	var possibleAttempts int
  5012  	var recommendedInterval time.Duration
  5013  	valid := true
  5014  	switch r.DelayFunction {
  5015  	case "constant":
  5016  		recommendedInterval = time.Duration(r.Attempts) * r.Delay
  5017  		if r.Interval < recommendedInterval {
  5018  			possibleAttempts = int(r.Interval / r.Delay)
  5019  			valid = false
  5020  		}
  5021  	case "exponential":
  5022  		for i := 0; i < r.Attempts; i++ {
  5023  			nextDelay := time.Duration(math.Pow(2, float64(i))) * r.Delay
  5024  			if nextDelay > r.MaxDelay {
  5025  				nextDelay = r.MaxDelay
  5026  				recommendedInterval += nextDelay
  5027  			} else {
  5028  				recommendedInterval = nextDelay
  5029  			}
  5030  			if recommendedInterval < r.Interval {
  5031  				possibleAttempts++
  5032  			}
  5033  		}
  5034  		if possibleAttempts < r.Attempts {
  5035  			valid = false
  5036  		}
  5037  	case "fibonacci":
  5038  		var slots []time.Duration
  5039  		slots = append(slots, r.Delay)
  5040  		slots = append(slots, r.Delay)
  5041  		reachedCeiling := false
  5042  		for i := 2; i < r.Attempts; i++ {
  5043  			var nextDelay time.Duration
  5044  			if reachedCeiling {
  5045  				//switch to linear
  5046  				nextDelay = slots[i-1] + r.MaxDelay
  5047  			} else {
  5048  				nextDelay = slots[i-1] + slots[i-2]
  5049  				if nextDelay > r.MaxDelay {
  5050  					nextDelay = r.MaxDelay
  5051  					reachedCeiling = true
  5052  				}
  5053  			}
  5054  			slots = append(slots, nextDelay)
  5055  		}
  5056  		recommendedInterval = slots[len(slots)-1]
  5057  		if r.Interval < recommendedInterval {
  5058  			valid = false
  5059  			// calculate possible attempts
  5060  			for i := 0; i < len(slots); i++ {
  5061  				if slots[i] > r.Interval {
  5062  					possibleAttempts = i
  5063  					break
  5064  				}
  5065  			}
  5066  		}
  5067  	default:
  5068  		return false, 0, 0
  5069  	}
  5070  	if possibleAttempts < 0 { // can happen if delay is bigger than interval
  5071  		possibleAttempts = 0
  5072  	}
  5073  	return valid, possibleAttempts, recommendedInterval
  5074  }
  5075  
  5076  func NewReschedulePolicy(jobType string) *ReschedulePolicy {
  5077  	switch jobType {
  5078  	case JobTypeService:
  5079  		rp := DefaultServiceJobReschedulePolicy
  5080  		return &rp
  5081  	case JobTypeBatch:
  5082  		rp := DefaultBatchJobReschedulePolicy
  5083  		return &rp
  5084  	}
  5085  	return nil
  5086  }
  5087  
  5088  const (
  5089  	MigrateStrategyHealthChecks = "checks"
  5090  	MigrateStrategyHealthStates = "task_states"
  5091  )
  5092  
  5093  type MigrateStrategy struct {
  5094  	MaxParallel     int
  5095  	HealthCheck     string
  5096  	MinHealthyTime  time.Duration
  5097  	HealthyDeadline time.Duration
  5098  }
  5099  
  5100  // DefaultMigrateStrategy is used for backwards compat with pre-0.8 Allocations
  5101  // that lack an update strategy.
  5102  //
  5103  // This function should match its counterpart in api/tasks.go
  5104  func DefaultMigrateStrategy() *MigrateStrategy {
  5105  	return &MigrateStrategy{
  5106  		MaxParallel:     1,
  5107  		HealthCheck:     MigrateStrategyHealthChecks,
  5108  		MinHealthyTime:  10 * time.Second,
  5109  		HealthyDeadline: 5 * time.Minute,
  5110  	}
  5111  }
  5112  
  5113  func (m *MigrateStrategy) Validate() error {
  5114  	var mErr multierror.Error
  5115  
  5116  	if m.MaxParallel < 0 {
  5117  		multierror.Append(&mErr, fmt.Errorf("MaxParallel must be >= 0 but found %d", m.MaxParallel))
  5118  	}
  5119  
  5120  	switch m.HealthCheck {
  5121  	case MigrateStrategyHealthChecks, MigrateStrategyHealthStates:
  5122  		// ok
  5123  	case "":
  5124  		if m.MaxParallel > 0 {
  5125  			multierror.Append(&mErr, fmt.Errorf("Missing HealthCheck"))
  5126  		}
  5127  	default:
  5128  		multierror.Append(&mErr, fmt.Errorf("Invalid HealthCheck: %q", m.HealthCheck))
  5129  	}
  5130  
  5131  	if m.MinHealthyTime < 0 {
  5132  		multierror.Append(&mErr, fmt.Errorf("MinHealthyTime is %s and must be >= 0", m.MinHealthyTime))
  5133  	}
  5134  
  5135  	if m.HealthyDeadline < 0 {
  5136  		multierror.Append(&mErr, fmt.Errorf("HealthyDeadline is %s and must be >= 0", m.HealthyDeadline))
  5137  	}
  5138  
  5139  	if m.MinHealthyTime > m.HealthyDeadline {
  5140  		multierror.Append(&mErr, fmt.Errorf("MinHealthyTime must be less than HealthyDeadline"))
  5141  	}
  5142  
  5143  	return mErr.ErrorOrNil()
  5144  }
  5145  
  5146  // TaskGroup is an atomic unit of placement. Each task group belongs to
  5147  // a job and may contain any number of tasks. A task group support running
  5148  // in many replicas using the same configuration..
  5149  type TaskGroup struct {
  5150  	// Name of the task group
  5151  	Name string
  5152  
  5153  	// Count is the number of replicas of this task group that should
  5154  	// be scheduled.
  5155  	Count int
  5156  
  5157  	// Update is used to control the update strategy for this task group
  5158  	Update *UpdateStrategy
  5159  
  5160  	// Migrate is used to control the migration strategy for this task group
  5161  	Migrate *MigrateStrategy
  5162  
  5163  	// Constraints can be specified at a task group level and apply to
  5164  	// all the tasks contained.
  5165  	Constraints []*Constraint
  5166  
  5167  	// Scaling is the list of autoscaling policies for the TaskGroup
  5168  	Scaling *ScalingPolicy
  5169  
  5170  	// RestartPolicy of a TaskGroup
  5171  	RestartPolicy *RestartPolicy
  5172  
  5173  	// Tasks are the collection of tasks that this task group needs to run
  5174  	Tasks []*Task
  5175  
  5176  	// EphemeralDisk is the disk resources that the task group requests
  5177  	EphemeralDisk *EphemeralDisk
  5178  
  5179  	// Meta is used to associate arbitrary metadata with this
  5180  	// task group. This is opaque to Nomad.
  5181  	Meta map[string]string
  5182  
  5183  	// ReschedulePolicy is used to configure how the scheduler should
  5184  	// retry failed allocations.
  5185  	ReschedulePolicy *ReschedulePolicy
  5186  
  5187  	// Affinities can be specified at the task group level to express
  5188  	// scheduling preferences.
  5189  	Affinities []*Affinity
  5190  
  5191  	// Spread can be specified at the task group level to express spreading
  5192  	// allocations across a desired attribute, such as datacenter
  5193  	Spreads []*Spread
  5194  
  5195  	// Networks are the network configuration for the task group. This can be
  5196  	// overridden in the task.
  5197  	Networks Networks
  5198  
  5199  	// Services this group provides
  5200  	Services []*Service
  5201  
  5202  	// Volumes is a map of volumes that have been requested by the task group.
  5203  	Volumes map[string]*VolumeRequest
  5204  
  5205  	// ShutdownDelay is the amount of time to wait between deregistering
  5206  	// group services in consul and stopping tasks.
  5207  	ShutdownDelay *time.Duration
  5208  }
  5209  
  5210  func (tg *TaskGroup) Copy() *TaskGroup {
  5211  	if tg == nil {
  5212  		return nil
  5213  	}
  5214  	ntg := new(TaskGroup)
  5215  	*ntg = *tg
  5216  	ntg.Update = ntg.Update.Copy()
  5217  	ntg.Constraints = CopySliceConstraints(ntg.Constraints)
  5218  	ntg.RestartPolicy = ntg.RestartPolicy.Copy()
  5219  	ntg.ReschedulePolicy = ntg.ReschedulePolicy.Copy()
  5220  	ntg.Affinities = CopySliceAffinities(ntg.Affinities)
  5221  	ntg.Spreads = CopySliceSpreads(ntg.Spreads)
  5222  	ntg.Volumes = CopyMapVolumeRequest(ntg.Volumes)
  5223  	ntg.Scaling = CopyScalingPolicy(ntg.Scaling)
  5224  
  5225  	// Copy the network objects
  5226  	if tg.Networks != nil {
  5227  		n := len(tg.Networks)
  5228  		ntg.Networks = make([]*NetworkResource, n)
  5229  		for i := 0; i < n; i++ {
  5230  			ntg.Networks[i] = tg.Networks[i].Copy()
  5231  		}
  5232  	}
  5233  
  5234  	if tg.Tasks != nil {
  5235  		tasks := make([]*Task, len(ntg.Tasks))
  5236  		for i, t := range ntg.Tasks {
  5237  			tasks[i] = t.Copy()
  5238  		}
  5239  		ntg.Tasks = tasks
  5240  	}
  5241  
  5242  	ntg.Meta = helper.CopyMapStringString(ntg.Meta)
  5243  
  5244  	if tg.EphemeralDisk != nil {
  5245  		ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
  5246  	}
  5247  
  5248  	if tg.Services != nil {
  5249  		ntg.Services = make([]*Service, len(tg.Services))
  5250  		for i, s := range tg.Services {
  5251  			ntg.Services[i] = s.Copy()
  5252  		}
  5253  	}
  5254  
  5255  	if tg.ShutdownDelay != nil {
  5256  		ntg.ShutdownDelay = tg.ShutdownDelay
  5257  	}
  5258  
  5259  	return ntg
  5260  }
  5261  
  5262  // Canonicalize is used to canonicalize fields in the TaskGroup.
  5263  func (tg *TaskGroup) Canonicalize(job *Job) {
  5264  	// Ensure that an empty and nil map are treated the same to avoid scheduling
  5265  	// problems since we use reflect DeepEquals.
  5266  	if len(tg.Meta) == 0 {
  5267  		tg.Meta = nil
  5268  	}
  5269  
  5270  	// Set the default restart policy.
  5271  	if tg.RestartPolicy == nil {
  5272  		tg.RestartPolicy = NewRestartPolicy(job.Type)
  5273  	}
  5274  
  5275  	if tg.ReschedulePolicy == nil {
  5276  		tg.ReschedulePolicy = NewReschedulePolicy(job.Type)
  5277  	}
  5278  
  5279  	// Canonicalize Migrate for service jobs
  5280  	if job.Type == JobTypeService && tg.Migrate == nil {
  5281  		tg.Migrate = DefaultMigrateStrategy()
  5282  	}
  5283  
  5284  	// Set a default ephemeral disk object if the user has not requested for one
  5285  	if tg.EphemeralDisk == nil {
  5286  		tg.EphemeralDisk = DefaultEphemeralDisk()
  5287  	}
  5288  
  5289  	for _, service := range tg.Services {
  5290  		service.Canonicalize(job.Name, tg.Name, "group")
  5291  	}
  5292  
  5293  	for _, network := range tg.Networks {
  5294  		network.Canonicalize()
  5295  	}
  5296  
  5297  	for _, task := range tg.Tasks {
  5298  		task.Canonicalize(job, tg)
  5299  	}
  5300  }
  5301  
  5302  // Validate is used to sanity check a task group
  5303  func (tg *TaskGroup) Validate(j *Job) error {
  5304  	var mErr multierror.Error
  5305  	if tg.Name == "" {
  5306  		mErr.Errors = append(mErr.Errors, errors.New("Missing task group name"))
  5307  	}
  5308  	if tg.Count < 0 {
  5309  		mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative"))
  5310  	}
  5311  	if len(tg.Tasks) == 0 {
  5312  		mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group"))
  5313  	}
  5314  	for idx, constr := range tg.Constraints {
  5315  		if err := constr.Validate(); err != nil {
  5316  			outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
  5317  			mErr.Errors = append(mErr.Errors, outer)
  5318  		}
  5319  	}
  5320  	if j.Type == JobTypeSystem {
  5321  		if tg.Affinities != nil {
  5322  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
  5323  		}
  5324  	} else {
  5325  		for idx, affinity := range tg.Affinities {
  5326  			if err := affinity.Validate(); err != nil {
  5327  				outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
  5328  				mErr.Errors = append(mErr.Errors, outer)
  5329  			}
  5330  		}
  5331  	}
  5332  
  5333  	if tg.RestartPolicy != nil {
  5334  		if err := tg.RestartPolicy.Validate(); err != nil {
  5335  			mErr.Errors = append(mErr.Errors, err)
  5336  		}
  5337  	} else {
  5338  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name))
  5339  	}
  5340  
  5341  	if j.Type == JobTypeSystem {
  5342  		if tg.Spreads != nil {
  5343  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza"))
  5344  		}
  5345  	} else {
  5346  		for idx, spread := range tg.Spreads {
  5347  			if err := spread.Validate(); err != nil {
  5348  				outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err)
  5349  				mErr.Errors = append(mErr.Errors, outer)
  5350  			}
  5351  		}
  5352  	}
  5353  
  5354  	if j.Type == JobTypeSystem {
  5355  		if tg.ReschedulePolicy != nil {
  5356  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs should not have a reschedule policy"))
  5357  		}
  5358  	} else {
  5359  		if tg.ReschedulePolicy != nil {
  5360  			if err := tg.ReschedulePolicy.Validate(); err != nil {
  5361  				mErr.Errors = append(mErr.Errors, err)
  5362  			}
  5363  		} else {
  5364  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a reschedule policy", tg.Name))
  5365  		}
  5366  	}
  5367  
  5368  	if tg.EphemeralDisk != nil {
  5369  		if err := tg.EphemeralDisk.Validate(); err != nil {
  5370  			mErr.Errors = append(mErr.Errors, err)
  5371  		}
  5372  	} else {
  5373  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
  5374  	}
  5375  
  5376  	// Validate the update strategy
  5377  	if u := tg.Update; u != nil {
  5378  		switch j.Type {
  5379  		case JobTypeService, JobTypeSystem:
  5380  		default:
  5381  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type))
  5382  		}
  5383  		if err := u.Validate(); err != nil {
  5384  			mErr.Errors = append(mErr.Errors, err)
  5385  		}
  5386  	}
  5387  
  5388  	// Validate the migration strategy
  5389  	switch j.Type {
  5390  	case JobTypeService:
  5391  		if tg.Migrate != nil {
  5392  			if err := tg.Migrate.Validate(); err != nil {
  5393  				mErr.Errors = append(mErr.Errors, err)
  5394  			}
  5395  		}
  5396  	default:
  5397  		if tg.Migrate != nil {
  5398  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow migrate block", j.Type))
  5399  		}
  5400  	}
  5401  
  5402  	// Check that there is only one leader task if any
  5403  	tasks := make(map[string]int)
  5404  	leaderTasks := 0
  5405  	for idx, task := range tg.Tasks {
  5406  		if task.Name == "" {
  5407  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1))
  5408  		} else if existing, ok := tasks[task.Name]; ok {
  5409  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1))
  5410  		} else {
  5411  			tasks[task.Name] = idx
  5412  		}
  5413  
  5414  		if task.Leader {
  5415  			leaderTasks++
  5416  		}
  5417  	}
  5418  
  5419  	if leaderTasks > 1 {
  5420  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
  5421  	}
  5422  
  5423  	// Validate the Host Volumes
  5424  	for name, decl := range tg.Volumes {
  5425  		if !(decl.Type == VolumeTypeHost ||
  5426  			decl.Type == VolumeTypeCSI) {
  5427  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume %s has unrecognised type %s", name, decl.Type))
  5428  			continue
  5429  		}
  5430  
  5431  		if decl.Source == "" {
  5432  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume %s has an empty source", name))
  5433  		}
  5434  	}
  5435  
  5436  	// Validate task group and task network resources
  5437  	if err := tg.validateNetworks(); err != nil {
  5438  		outer := fmt.Errorf("Task group network validation failed: %v", err)
  5439  		mErr.Errors = append(mErr.Errors, outer)
  5440  	}
  5441  
  5442  	// Validate task group and task services
  5443  	if err := tg.validateServices(); err != nil {
  5444  		outer := fmt.Errorf("Task group service validation failed: %v", err)
  5445  		mErr.Errors = append(mErr.Errors, outer)
  5446  	}
  5447  
  5448  	// Validate the scaling policy
  5449  	if err := tg.validateScalingPolicy(); err != nil {
  5450  		outer := fmt.Errorf("Task group scaling policy validation failed: %v", err)
  5451  		mErr.Errors = append(mErr.Errors, outer)
  5452  	}
  5453  
  5454  	// Validate the tasks
  5455  	for _, task := range tg.Tasks {
  5456  		// Validate the task does not reference undefined volume mounts
  5457  		for i, mnt := range task.VolumeMounts {
  5458  			if mnt.Volume == "" {
  5459  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %s has a volume mount (%d) referencing an empty volume", task.Name, i))
  5460  				continue
  5461  			}
  5462  
  5463  			if _, ok := tg.Volumes[mnt.Volume]; !ok {
  5464  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %s has a volume mount (%d) referencing undefined volume %s", task.Name, i, mnt.Volume))
  5465  				continue
  5466  			}
  5467  		}
  5468  
  5469  		if err := task.Validate(tg.EphemeralDisk, j.Type, tg.Services); err != nil {
  5470  			outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
  5471  			mErr.Errors = append(mErr.Errors, outer)
  5472  		}
  5473  	}
  5474  	return mErr.ErrorOrNil()
  5475  }
  5476  
  5477  func (tg *TaskGroup) validateNetworks() error {
  5478  	var mErr multierror.Error
  5479  	portLabels := make(map[string]string)
  5480  	staticPorts := make(map[int]string)
  5481  	mappedPorts := make(map[int]string)
  5482  
  5483  	for _, net := range tg.Networks {
  5484  		for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
  5485  			if other, ok := portLabels[port.Label]; ok {
  5486  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
  5487  			} else {
  5488  				portLabels[port.Label] = "taskgroup network"
  5489  			}
  5490  
  5491  			if port.Value != 0 {
  5492  				// static port
  5493  				if other, ok := staticPorts[port.Value]; ok {
  5494  					err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
  5495  					mErr.Errors = append(mErr.Errors, err)
  5496  				} else {
  5497  					staticPorts[port.Value] = fmt.Sprintf("taskgroup network:%s", port.Label)
  5498  				}
  5499  			}
  5500  
  5501  			if port.To > 0 {
  5502  				if other, ok := mappedPorts[port.To]; ok {
  5503  					err := fmt.Errorf("Port mapped to %d already in use by %s", port.To, other)
  5504  					mErr.Errors = append(mErr.Errors, err)
  5505  				} else {
  5506  					mappedPorts[port.To] = fmt.Sprintf("taskgroup network:%s", port.Label)
  5507  				}
  5508  			} else if port.To < -1 {
  5509  				err := fmt.Errorf("Port %q cannot be mapped to negative value %d", port.Label, port.To)
  5510  				mErr.Errors = append(mErr.Errors, err)
  5511  			}
  5512  		}
  5513  	}
  5514  	// Check for duplicate tasks or port labels, and no duplicated static or mapped ports
  5515  	for _, task := range tg.Tasks {
  5516  		if task.Resources == nil {
  5517  			continue
  5518  		}
  5519  
  5520  		for _, net := range task.Resources.Networks {
  5521  			for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
  5522  				if other, ok := portLabels[port.Label]; ok {
  5523  					mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
  5524  				}
  5525  
  5526  				if port.Value != 0 {
  5527  					if other, ok := staticPorts[port.Value]; ok {
  5528  						err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
  5529  						mErr.Errors = append(mErr.Errors, err)
  5530  					} else {
  5531  						staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
  5532  					}
  5533  				}
  5534  
  5535  				if port.To != 0 {
  5536  					if other, ok := mappedPorts[port.To]; ok {
  5537  						err := fmt.Errorf("Port mapped to %d already in use by %s", port.To, other)
  5538  						mErr.Errors = append(mErr.Errors, err)
  5539  					} else {
  5540  						mappedPorts[port.To] = fmt.Sprintf("taskgroup network:%s", port.Label)
  5541  					}
  5542  				}
  5543  			}
  5544  		}
  5545  	}
  5546  	return mErr.ErrorOrNil()
  5547  }
  5548  
  5549  // validateServices runs Service.Validate() on group-level services,
  5550  // checks that group services do not conflict with task services and that
  5551  // group service checks that refer to tasks only refer to tasks that exist.
  5552  func (tg *TaskGroup) validateServices() error {
  5553  	var mErr multierror.Error
  5554  	knownTasks := make(map[string]struct{})
  5555  	knownServices := make(map[string]struct{})
  5556  
  5557  	// Create a map of known tasks and their services so we can compare
  5558  	// vs the group-level services and checks
  5559  	for _, task := range tg.Tasks {
  5560  		knownTasks[task.Name] = struct{}{}
  5561  		if task.Services == nil {
  5562  			continue
  5563  		}
  5564  		for _, service := range task.Services {
  5565  			if _, ok := knownServices[service.Name+service.PortLabel]; ok {
  5566  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Service %s is duplicate", service.Name))
  5567  			}
  5568  			for _, check := range service.Checks {
  5569  				if check.TaskName != "" {
  5570  					mErr.Errors = append(mErr.Errors, fmt.Errorf("Check %s is invalid: only task group service checks can be assigned tasks", check.Name))
  5571  				}
  5572  			}
  5573  			knownServices[service.Name+service.PortLabel] = struct{}{}
  5574  		}
  5575  	}
  5576  	for i, service := range tg.Services {
  5577  		if err := service.Validate(); err != nil {
  5578  			outer := fmt.Errorf("Service[%d] %s validation failed: %s", i, service.Name, err)
  5579  			mErr.Errors = append(mErr.Errors, outer)
  5580  			// we break here to avoid the risk of crashing on null-pointer
  5581  			// access in a later step, accepting that we might miss out on
  5582  			// error messages to provide the user.
  5583  			continue
  5584  		}
  5585  		if _, ok := knownServices[service.Name+service.PortLabel]; ok {
  5586  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Service %s is duplicate", service.Name))
  5587  		}
  5588  		knownServices[service.Name+service.PortLabel] = struct{}{}
  5589  		for _, check := range service.Checks {
  5590  			if check.TaskName != "" {
  5591  				if check.Type != ServiceCheckScript && check.Type != ServiceCheckGRPC {
  5592  					mErr.Errors = append(mErr.Errors,
  5593  						fmt.Errorf("Check %s invalid: only script and gRPC checks should have tasks", check.Name))
  5594  				}
  5595  				if _, ok := knownTasks[check.TaskName]; !ok {
  5596  					mErr.Errors = append(mErr.Errors,
  5597  						fmt.Errorf("Check %s invalid: refers to non-existent task %s", check.Name, check.TaskName))
  5598  				}
  5599  			}
  5600  		}
  5601  	}
  5602  	return mErr.ErrorOrNil()
  5603  }
  5604  
  5605  // validateScalingPolicy ensures that the scaling policy has consistent
  5606  // min and max, not in conflict with the task group count
  5607  func (tg *TaskGroup) validateScalingPolicy() error {
  5608  	if tg.Scaling == nil {
  5609  		return nil
  5610  	}
  5611  
  5612  	var mErr multierror.Error
  5613  
  5614  	if tg.Scaling.Min > tg.Scaling.Max {
  5615  		mErr.Errors = append(mErr.Errors,
  5616  			fmt.Errorf("Scaling policy invalid: maximum count must not be less than minimum count"))
  5617  	}
  5618  
  5619  	if int64(tg.Count) < tg.Scaling.Min {
  5620  		mErr.Errors = append(mErr.Errors,
  5621  			fmt.Errorf("Scaling policy invalid: task group count must not be less than minimum count in scaling policy"))
  5622  	}
  5623  
  5624  	if tg.Scaling.Max < int64(tg.Count) {
  5625  		mErr.Errors = append(mErr.Errors,
  5626  			fmt.Errorf("Scaling policy invalid: task group count must not be greater than maximum count in scaling policy"))
  5627  	}
  5628  
  5629  	return mErr.ErrorOrNil()
  5630  }
  5631  
  5632  // Warnings returns a list of warnings that may be from dubious settings or
  5633  // deprecation warnings.
  5634  func (tg *TaskGroup) Warnings(j *Job) error {
  5635  	var mErr multierror.Error
  5636  
  5637  	// Validate the update strategy
  5638  	if u := tg.Update; u != nil {
  5639  		// Check the counts are appropriate
  5640  		if u.MaxParallel > tg.Count {
  5641  			mErr.Errors = append(mErr.Errors,
  5642  				fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+
  5643  					"A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count))
  5644  		}
  5645  	}
  5646  
  5647  	for _, t := range tg.Tasks {
  5648  		if err := t.Warnings(); err != nil {
  5649  			err = multierror.Prefix(err, fmt.Sprintf("Task %q:", t.Name))
  5650  			mErr.Errors = append(mErr.Errors, err)
  5651  		}
  5652  	}
  5653  
  5654  	return mErr.ErrorOrNil()
  5655  }
  5656  
  5657  // LookupTask finds a task by name
  5658  func (tg *TaskGroup) LookupTask(name string) *Task {
  5659  	for _, t := range tg.Tasks {
  5660  		if t.Name == name {
  5661  			return t
  5662  		}
  5663  	}
  5664  	return nil
  5665  }
  5666  
  5667  func (tg *TaskGroup) UsesConnect() bool {
  5668  	for _, service := range tg.Services {
  5669  		if service.Connect != nil {
  5670  			if service.Connect.Native || service.Connect.SidecarService != nil {
  5671  				return true
  5672  			}
  5673  		}
  5674  	}
  5675  	return false
  5676  }
  5677  
  5678  func (tg *TaskGroup) GoString() string {
  5679  	return fmt.Sprintf("*%#v", *tg)
  5680  }
  5681  
  5682  // CheckRestart describes if and when a task should be restarted based on
  5683  // failing health checks.
  5684  type CheckRestart struct {
  5685  	Limit          int           // Restart task after this many unhealthy intervals
  5686  	Grace          time.Duration // Grace time to give tasks after starting to get healthy
  5687  	IgnoreWarnings bool          // If true treat checks in `warning` as passing
  5688  }
  5689  
  5690  func (c *CheckRestart) Copy() *CheckRestart {
  5691  	if c == nil {
  5692  		return nil
  5693  	}
  5694  
  5695  	nc := new(CheckRestart)
  5696  	*nc = *c
  5697  	return nc
  5698  }
  5699  
  5700  func (c *CheckRestart) Equals(o *CheckRestart) bool {
  5701  	if c == nil || o == nil {
  5702  		return c == o
  5703  	}
  5704  
  5705  	if c.Limit != o.Limit {
  5706  		return false
  5707  	}
  5708  
  5709  	if c.Grace != o.Grace {
  5710  		return false
  5711  	}
  5712  
  5713  	if c.IgnoreWarnings != o.IgnoreWarnings {
  5714  		return false
  5715  	}
  5716  
  5717  	return true
  5718  }
  5719  
  5720  func (c *CheckRestart) Validate() error {
  5721  	if c == nil {
  5722  		return nil
  5723  	}
  5724  
  5725  	var mErr multierror.Error
  5726  	if c.Limit < 0 {
  5727  		mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit))
  5728  	}
  5729  
  5730  	if c.Grace < 0 {
  5731  		mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace))
  5732  	}
  5733  
  5734  	return mErr.ErrorOrNil()
  5735  }
  5736  
  5737  const (
  5738  	// DefaultKillTimeout is the default timeout between signaling a task it
  5739  	// will be killed and killing it.
  5740  	DefaultKillTimeout = 5 * time.Second
  5741  )
  5742  
  5743  // LogConfig provides configuration for log rotation
  5744  type LogConfig struct {
  5745  	MaxFiles      int
  5746  	MaxFileSizeMB int
  5747  }
  5748  
  5749  func (l *LogConfig) Copy() *LogConfig {
  5750  	if l == nil {
  5751  		return nil
  5752  	}
  5753  	return &LogConfig{
  5754  		MaxFiles:      l.MaxFiles,
  5755  		MaxFileSizeMB: l.MaxFileSizeMB,
  5756  	}
  5757  }
  5758  
  5759  // DefaultLogConfig returns the default LogConfig values.
  5760  func DefaultLogConfig() *LogConfig {
  5761  	return &LogConfig{
  5762  		MaxFiles:      10,
  5763  		MaxFileSizeMB: 10,
  5764  	}
  5765  }
  5766  
  5767  // Validate returns an error if the log config specified are less than
  5768  // the minimum allowed.
  5769  func (l *LogConfig) Validate() error {
  5770  	var mErr multierror.Error
  5771  	if l.MaxFiles < 1 {
  5772  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles))
  5773  	}
  5774  	if l.MaxFileSizeMB < 1 {
  5775  		mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB))
  5776  	}
  5777  	return mErr.ErrorOrNil()
  5778  }
  5779  
  5780  // Task is a single process typically that is executed as part of a task group.
  5781  type Task struct {
  5782  	// Name of the task
  5783  	Name string
  5784  
  5785  	// Driver is used to control which driver is used
  5786  	Driver string
  5787  
  5788  	// User is used to determine which user will run the task. It defaults to
  5789  	// the same user the Nomad client is being run as.
  5790  	User string
  5791  
  5792  	// Config is provided to the driver to initialize
  5793  	Config map[string]interface{}
  5794  
  5795  	// Map of environment variables to be used by the driver
  5796  	Env map[string]string
  5797  
  5798  	// List of service definitions exposed by the Task
  5799  	Services []*Service
  5800  
  5801  	// Vault is used to define the set of Vault policies that this task should
  5802  	// have access to.
  5803  	Vault *Vault
  5804  
  5805  	// Templates are the set of templates to be rendered for the task.
  5806  	Templates []*Template
  5807  
  5808  	// Constraints can be specified at a task level and apply only to
  5809  	// the particular task.
  5810  	Constraints []*Constraint
  5811  
  5812  	// Affinities can be specified at the task level to express
  5813  	// scheduling preferences
  5814  	Affinities []*Affinity
  5815  
  5816  	// Resources is the resources needed by this task
  5817  	Resources *Resources
  5818  
  5819  	// RestartPolicy of a TaskGroup
  5820  	RestartPolicy *RestartPolicy
  5821  
  5822  	// DispatchPayload configures how the task retrieves its input from a dispatch
  5823  	DispatchPayload *DispatchPayloadConfig
  5824  
  5825  	Lifecycle *TaskLifecycleConfig
  5826  
  5827  	// Meta is used to associate arbitrary metadata with this
  5828  	// task. This is opaque to Nomad.
  5829  	Meta map[string]string
  5830  
  5831  	// KillTimeout is the time between signaling a task that it will be
  5832  	// killed and killing it.
  5833  	KillTimeout time.Duration
  5834  
  5835  	// LogConfig provides configuration for log rotation
  5836  	LogConfig *LogConfig
  5837  
  5838  	// Artifacts is a list of artifacts to download and extract before running
  5839  	// the task.
  5840  	Artifacts []*TaskArtifact
  5841  
  5842  	// Leader marks the task as the leader within the group. When the leader
  5843  	// task exits, other tasks will be gracefully terminated.
  5844  	Leader bool
  5845  
  5846  	// ShutdownDelay is the duration of the delay between deregistering a
  5847  	// task from Consul and sending it a signal to shutdown. See #2441
  5848  	ShutdownDelay time.Duration
  5849  
  5850  	// VolumeMounts is a list of Volume name <-> mount configurations that will be
  5851  	// attached to this task.
  5852  	VolumeMounts []*VolumeMount
  5853  
  5854  	// The kill signal to use for the task. This is an optional specification,
  5855  
  5856  	// KillSignal is the kill signal to use for the task. This is an optional
  5857  	// specification and defaults to SIGINT
  5858  	KillSignal string
  5859  
  5860  	// Used internally to manage tasks according to their TaskKind. Initial use case
  5861  	// is for Consul Connect
  5862  	Kind TaskKind
  5863  
  5864  	// CSIPluginConfig is used to configure the plugin supervisor for the task.
  5865  	CSIPluginConfig *TaskCSIPluginConfig
  5866  }
  5867  
  5868  // UsesConnect is for conveniently detecting if the Task is able to make use
  5869  // of Consul Connect features. This will be indicated in the TaskKind of the
  5870  // Task, which exports known types of Tasks.
  5871  //
  5872  // Currently only Consul Connect Proxy tasks are known.
  5873  // (Consul Connect Native tasks will be supported soon).
  5874  func (t *Task) UsesConnect() bool {
  5875  	// todo(shoenig): native tasks
  5876  	switch {
  5877  	case t.Kind.IsConnectProxy():
  5878  		return true
  5879  	default:
  5880  		return false
  5881  	}
  5882  }
  5883  
  5884  func (t *Task) Copy() *Task {
  5885  	if t == nil {
  5886  		return nil
  5887  	}
  5888  	nt := new(Task)
  5889  	*nt = *t
  5890  	nt.Env = helper.CopyMapStringString(nt.Env)
  5891  
  5892  	if t.Services != nil {
  5893  		services := make([]*Service, len(nt.Services))
  5894  		for i, s := range nt.Services {
  5895  			services[i] = s.Copy()
  5896  		}
  5897  		nt.Services = services
  5898  	}
  5899  
  5900  	nt.Constraints = CopySliceConstraints(nt.Constraints)
  5901  	nt.Affinities = CopySliceAffinities(nt.Affinities)
  5902  	nt.VolumeMounts = CopySliceVolumeMount(nt.VolumeMounts)
  5903  	nt.CSIPluginConfig = nt.CSIPluginConfig.Copy()
  5904  
  5905  	nt.Vault = nt.Vault.Copy()
  5906  	nt.Resources = nt.Resources.Copy()
  5907  	nt.LogConfig = nt.LogConfig.Copy()
  5908  	nt.Meta = helper.CopyMapStringString(nt.Meta)
  5909  	nt.DispatchPayload = nt.DispatchPayload.Copy()
  5910  	nt.Lifecycle = nt.Lifecycle.Copy()
  5911  
  5912  	if t.Artifacts != nil {
  5913  		artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
  5914  		for _, a := range nt.Artifacts {
  5915  			artifacts = append(artifacts, a.Copy())
  5916  		}
  5917  		nt.Artifacts = artifacts
  5918  	}
  5919  
  5920  	if i, err := copystructure.Copy(nt.Config); err != nil {
  5921  		panic(err.Error())
  5922  	} else {
  5923  		nt.Config = i.(map[string]interface{})
  5924  	}
  5925  
  5926  	if t.Templates != nil {
  5927  		templates := make([]*Template, len(t.Templates))
  5928  		for i, tmpl := range nt.Templates {
  5929  			templates[i] = tmpl.Copy()
  5930  		}
  5931  		nt.Templates = templates
  5932  	}
  5933  
  5934  	return nt
  5935  }
  5936  
  5937  // Canonicalize canonicalizes fields in the task.
  5938  func (t *Task) Canonicalize(job *Job, tg *TaskGroup) {
  5939  	// Ensure that an empty and nil map are treated the same to avoid scheduling
  5940  	// problems since we use reflect DeepEquals.
  5941  	if len(t.Meta) == 0 {
  5942  		t.Meta = nil
  5943  	}
  5944  	if len(t.Config) == 0 {
  5945  		t.Config = nil
  5946  	}
  5947  	if len(t.Env) == 0 {
  5948  		t.Env = nil
  5949  	}
  5950  
  5951  	for _, service := range t.Services {
  5952  		service.Canonicalize(job.Name, tg.Name, t.Name)
  5953  	}
  5954  
  5955  	// If Resources are nil initialize them to defaults, otherwise canonicalize
  5956  	if t.Resources == nil {
  5957  		t.Resources = DefaultResources()
  5958  	} else {
  5959  		t.Resources.Canonicalize()
  5960  	}
  5961  
  5962  	if t.RestartPolicy == nil {
  5963  		t.RestartPolicy = tg.RestartPolicy
  5964  	}
  5965  
  5966  	// Set the default timeout if it is not specified.
  5967  	if t.KillTimeout == 0 {
  5968  		t.KillTimeout = DefaultKillTimeout
  5969  	}
  5970  
  5971  	if t.Vault != nil {
  5972  		t.Vault.Canonicalize()
  5973  	}
  5974  
  5975  	for _, template := range t.Templates {
  5976  		template.Canonicalize()
  5977  	}
  5978  }
  5979  
  5980  func (t *Task) GoString() string {
  5981  	return fmt.Sprintf("*%#v", *t)
  5982  }
  5983  
  5984  // Validate is used to sanity check a task
  5985  func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices []*Service) error {
  5986  	var mErr multierror.Error
  5987  	if t.Name == "" {
  5988  		mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
  5989  	}
  5990  	if strings.ContainsAny(t.Name, `/\`) {
  5991  		// We enforce this so that when creating the directory on disk it will
  5992  		// not have any slashes.
  5993  		mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes"))
  5994  	}
  5995  	if t.Driver == "" {
  5996  		mErr.Errors = append(mErr.Errors, errors.New("Missing task driver"))
  5997  	}
  5998  	if t.KillTimeout < 0 {
  5999  		mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value"))
  6000  	}
  6001  	if t.ShutdownDelay < 0 {
  6002  		mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
  6003  	}
  6004  
  6005  	// Validate the resources.
  6006  	if t.Resources == nil {
  6007  		mErr.Errors = append(mErr.Errors, errors.New("Missing task resources"))
  6008  	} else if err := t.Resources.Validate(); err != nil {
  6009  		mErr.Errors = append(mErr.Errors, err)
  6010  	}
  6011  
  6012  	// Validate the log config
  6013  	if t.LogConfig == nil {
  6014  		mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config"))
  6015  	} else if err := t.LogConfig.Validate(); err != nil {
  6016  		mErr.Errors = append(mErr.Errors, err)
  6017  	}
  6018  
  6019  	for idx, constr := range t.Constraints {
  6020  		if err := constr.Validate(); err != nil {
  6021  			outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
  6022  			mErr.Errors = append(mErr.Errors, outer)
  6023  		}
  6024  
  6025  		switch constr.Operand {
  6026  		case ConstraintDistinctHosts, ConstraintDistinctProperty:
  6027  			outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand)
  6028  			mErr.Errors = append(mErr.Errors, outer)
  6029  		}
  6030  	}
  6031  
  6032  	if jobType == JobTypeSystem {
  6033  		if t.Affinities != nil {
  6034  			mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
  6035  		}
  6036  	} else {
  6037  		for idx, affinity := range t.Affinities {
  6038  			if err := affinity.Validate(); err != nil {
  6039  				outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
  6040  				mErr.Errors = append(mErr.Errors, outer)
  6041  			}
  6042  		}
  6043  	}
  6044  
  6045  	// Validate Services
  6046  	if err := validateServices(t); err != nil {
  6047  		mErr.Errors = append(mErr.Errors, err)
  6048  	}
  6049  
  6050  	if t.LogConfig != nil && ephemeralDisk != nil {
  6051  		logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB)
  6052  		if ephemeralDisk.SizeMB <= logUsage {
  6053  			mErr.Errors = append(mErr.Errors,
  6054  				fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)",
  6055  					logUsage, ephemeralDisk.SizeMB))
  6056  		}
  6057  	}
  6058  
  6059  	for idx, artifact := range t.Artifacts {
  6060  		if err := artifact.Validate(); err != nil {
  6061  			outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err)
  6062  			mErr.Errors = append(mErr.Errors, outer)
  6063  		}
  6064  	}
  6065  
  6066  	if t.Vault != nil {
  6067  		if err := t.Vault.Validate(); err != nil {
  6068  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err))
  6069  		}
  6070  	}
  6071  
  6072  	destinations := make(map[string]int, len(t.Templates))
  6073  	for idx, tmpl := range t.Templates {
  6074  		if err := tmpl.Validate(); err != nil {
  6075  			outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err)
  6076  			mErr.Errors = append(mErr.Errors, outer)
  6077  		}
  6078  
  6079  		if other, ok := destinations[tmpl.DestPath]; ok {
  6080  			outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other)
  6081  			mErr.Errors = append(mErr.Errors, outer)
  6082  		} else {
  6083  			destinations[tmpl.DestPath] = idx + 1
  6084  		}
  6085  	}
  6086  
  6087  	// Validate the dispatch payload block if there
  6088  	if t.DispatchPayload != nil {
  6089  		if err := t.DispatchPayload.Validate(); err != nil {
  6090  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
  6091  		}
  6092  	}
  6093  
  6094  	// Validate the Lifecycle block if there
  6095  	if t.Lifecycle != nil {
  6096  		if err := t.Lifecycle.Validate(); err != nil {
  6097  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Lifecycle validation failed: %v", err))
  6098  		}
  6099  
  6100  	}
  6101  
  6102  	// Validation for TaskKind field which is used for Consul Connect integration
  6103  	if t.Kind.IsConnectProxy() {
  6104  		// This task is a Connect proxy so it should not have service stanzas
  6105  		if len(t.Services) > 0 {
  6106  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service stanza"))
  6107  		}
  6108  		if t.Leader {
  6109  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have leader set"))
  6110  		}
  6111  
  6112  		// Ensure the proxy task has a corresponding service entry
  6113  		serviceErr := ValidateConnectProxyService(t.Kind.Value(), tgServices)
  6114  		if serviceErr != nil {
  6115  			mErr.Errors = append(mErr.Errors, serviceErr)
  6116  		}
  6117  	}
  6118  
  6119  	// Validation for volumes
  6120  	for idx, vm := range t.VolumeMounts {
  6121  		if !MountPropagationModeIsValid(vm.PropagationMode) {
  6122  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) has an invalid propagation mode: \"%s\"", idx, vm.PropagationMode))
  6123  		}
  6124  	}
  6125  
  6126  	// Validate CSI Plugin Config
  6127  	if t.CSIPluginConfig != nil {
  6128  		if t.CSIPluginConfig.ID == "" {
  6129  			mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig must have a non-empty PluginID"))
  6130  		}
  6131  
  6132  		if !CSIPluginTypeIsValid(t.CSIPluginConfig.Type) {
  6133  			mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"%s\"", t.CSIPluginConfig.Type))
  6134  		}
  6135  
  6136  		// TODO: Investigate validation of the PluginMountDir. Not much we can do apart from check IsAbs until after we understand its execution environment though :(
  6137  	}
  6138  
  6139  	return mErr.ErrorOrNil()
  6140  }
  6141  
  6142  // validateServices takes a task and validates the services within it are valid
  6143  // and reference ports that exist.
  6144  func validateServices(t *Task) error {
  6145  	var mErr multierror.Error
  6146  
  6147  	// Ensure that services don't ask for nonexistent ports and their names are
  6148  	// unique.
  6149  	servicePorts := make(map[string]map[string]struct{})
  6150  	addServicePort := func(label, service string) {
  6151  		if _, ok := servicePorts[label]; !ok {
  6152  			servicePorts[label] = map[string]struct{}{}
  6153  		}
  6154  		servicePorts[label][service] = struct{}{}
  6155  	}
  6156  	knownServices := make(map[string]struct{})
  6157  	for i, service := range t.Services {
  6158  		if err := service.Validate(); err != nil {
  6159  			outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err)
  6160  			mErr.Errors = append(mErr.Errors, outer)
  6161  		}
  6162  
  6163  		// Ensure that services with the same name are not being registered for
  6164  		// the same port
  6165  		if _, ok := knownServices[service.Name+service.PortLabel]; ok {
  6166  			mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
  6167  		}
  6168  		knownServices[service.Name+service.PortLabel] = struct{}{}
  6169  
  6170  		if service.PortLabel != "" {
  6171  			if service.AddressMode == "driver" {
  6172  				// Numeric port labels are valid for address_mode=driver
  6173  				_, err := strconv.Atoi(service.PortLabel)
  6174  				if err != nil {
  6175  					// Not a numeric port label, add it to list to check
  6176  					addServicePort(service.PortLabel, service.Name)
  6177  				}
  6178  			} else {
  6179  				addServicePort(service.PortLabel, service.Name)
  6180  			}
  6181  		}
  6182  
  6183  		// Ensure that check names are unique and have valid ports
  6184  		knownChecks := make(map[string]struct{})
  6185  		for _, check := range service.Checks {
  6186  			if _, ok := knownChecks[check.Name]; ok {
  6187  				mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
  6188  			}
  6189  			knownChecks[check.Name] = struct{}{}
  6190  
  6191  			if !check.RequiresPort() {
  6192  				// No need to continue validating check if it doesn't need a port
  6193  				continue
  6194  			}
  6195  
  6196  			effectivePort := check.PortLabel
  6197  			if effectivePort == "" {
  6198  				// Inherits from service
  6199  				effectivePort = service.PortLabel
  6200  			}
  6201  
  6202  			if effectivePort == "" {
  6203  				mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is missing a port", check.Name))
  6204  				continue
  6205  			}
  6206  
  6207  			isNumeric := false
  6208  			portNumber, err := strconv.Atoi(effectivePort)
  6209  			if err == nil {
  6210  				isNumeric = true
  6211  			}
  6212  
  6213  			// Numeric ports are fine for address_mode = "driver"
  6214  			if check.AddressMode == "driver" && isNumeric {
  6215  				if portNumber <= 0 {
  6216  					mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q has invalid numeric port %d", check.Name, portNumber))
  6217  				}
  6218  				continue
  6219  			}
  6220  
  6221  			if isNumeric {
  6222  				mErr.Errors = append(mErr.Errors, fmt.Errorf(`check %q cannot use a numeric port %d without setting address_mode="driver"`, check.Name, portNumber))
  6223  				continue
  6224  			}
  6225  
  6226  			// PortLabel must exist, report errors by its parent service
  6227  			addServicePort(effectivePort, service.Name)
  6228  		}
  6229  	}
  6230  
  6231  	// Get the set of port labels.
  6232  	portLabels := make(map[string]struct{})
  6233  	if t.Resources != nil {
  6234  		for _, network := range t.Resources.Networks {
  6235  			ports := network.PortLabels()
  6236  			for portLabel := range ports {
  6237  				portLabels[portLabel] = struct{}{}
  6238  			}
  6239  		}
  6240  	}
  6241  
  6242  	// Iterate over a sorted list of keys to make error listings stable
  6243  	keys := make([]string, 0, len(servicePorts))
  6244  	for p := range servicePorts {
  6245  		keys = append(keys, p)
  6246  	}
  6247  	sort.Strings(keys)
  6248  
  6249  	// Ensure all ports referenced in services exist.
  6250  	for _, servicePort := range keys {
  6251  		services := servicePorts[servicePort]
  6252  		_, ok := portLabels[servicePort]
  6253  		if !ok {
  6254  			names := make([]string, 0, len(services))
  6255  			for name := range services {
  6256  				names = append(names, name)
  6257  			}
  6258  
  6259  			// Keep order deterministic
  6260  			sort.Strings(names)
  6261  			joined := strings.Join(names, ", ")
  6262  			err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined)
  6263  			mErr.Errors = append(mErr.Errors, err)
  6264  		}
  6265  	}
  6266  
  6267  	// Ensure address mode is valid
  6268  	return mErr.ErrorOrNil()
  6269  }
  6270  
  6271  func (t *Task) Warnings() error {
  6272  	var mErr multierror.Error
  6273  
  6274  	// Validate the resources
  6275  	if t.Resources != nil && t.Resources.IOPS != 0 {
  6276  		mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource stanza."))
  6277  	}
  6278  
  6279  	for idx, tmpl := range t.Templates {
  6280  		if err := tmpl.Warnings(); err != nil {
  6281  			err = multierror.Prefix(err, fmt.Sprintf("Template[%d]", idx))
  6282  			mErr.Errors = append(mErr.Errors, err)
  6283  		}
  6284  	}
  6285  
  6286  	return mErr.ErrorOrNil()
  6287  }
  6288  
  6289  // TaskKind identifies the special kinds of tasks using the following format:
  6290  // '<kind_name>(:<identifier>)`. The TaskKind can optionally include an identifier that
  6291  // is opaque to the Task. This identifier can be used to relate the task to some
  6292  // other entity based on the kind.
  6293  //
  6294  // For example, a task may have the TaskKind of `connect-proxy:service` where
  6295  // 'connect-proxy' is the kind name and 'service' is the identifier that relates the
  6296  // task to the service name of which it is a connect proxy for.
  6297  type TaskKind string
  6298  
  6299  // Name returns the kind name portion of the TaskKind
  6300  func (k TaskKind) Name() string {
  6301  	return strings.Split(string(k), ":")[0]
  6302  }
  6303  
  6304  // Value returns the identifier of the TaskKind or an empty string if it doesn't
  6305  // include one.
  6306  func (k TaskKind) Value() string {
  6307  	if s := strings.SplitN(string(k), ":", 2); len(s) > 1 {
  6308  		return s[1]
  6309  	}
  6310  	return ""
  6311  }
  6312  
  6313  // IsConnectProxy returns true if the TaskKind is connect-proxy
  6314  func (k TaskKind) IsConnectProxy() bool {
  6315  	return strings.HasPrefix(string(k), ConnectProxyPrefix+":") && len(k) > len(ConnectProxyPrefix)+1
  6316  }
  6317  
  6318  // ConnectProxyPrefix is the prefix used for fields referencing a Consul Connect
  6319  // Proxy
  6320  const ConnectProxyPrefix = "connect-proxy"
  6321  
  6322  // ValidateConnectProxyService checks that the service that is being
  6323  // proxied by this task exists in the task group and contains
  6324  // valid Connect config.
  6325  func ValidateConnectProxyService(serviceName string, tgServices []*Service) error {
  6326  	found := false
  6327  	names := make([]string, 0, len(tgServices))
  6328  	for _, svc := range tgServices {
  6329  		if svc.Connect == nil || svc.Connect.SidecarService == nil {
  6330  			continue
  6331  		}
  6332  
  6333  		if svc.Name == serviceName {
  6334  			found = true
  6335  			break
  6336  		}
  6337  
  6338  		// Build up list of mismatched Connect service names for error
  6339  		// reporting.
  6340  		names = append(names, svc.Name)
  6341  	}
  6342  
  6343  	if !found {
  6344  		if len(names) == 0 {
  6345  			return fmt.Errorf("No Connect services in task group with Connect proxy (%q)", serviceName)
  6346  		} else {
  6347  			return fmt.Errorf("Connect proxy service name (%q) not found in Connect services from task group: %s", serviceName, names)
  6348  		}
  6349  	}
  6350  
  6351  	return nil
  6352  }
  6353  
  6354  const (
  6355  	// TemplateChangeModeNoop marks that no action should be taken if the
  6356  	// template is re-rendered
  6357  	TemplateChangeModeNoop = "noop"
  6358  
  6359  	// TemplateChangeModeSignal marks that the task should be signaled if the
  6360  	// template is re-rendered
  6361  	TemplateChangeModeSignal = "signal"
  6362  
  6363  	// TemplateChangeModeRestart marks that the task should be restarted if the
  6364  	// template is re-rendered
  6365  	TemplateChangeModeRestart = "restart"
  6366  )
  6367  
  6368  var (
  6369  	// TemplateChangeModeInvalidError is the error for when an invalid change
  6370  	// mode is given
  6371  	TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart")
  6372  )
  6373  
  6374  // Template represents a template configuration to be rendered for a given task
  6375  type Template struct {
  6376  	// SourcePath is the path to the template to be rendered
  6377  	SourcePath string
  6378  
  6379  	// DestPath is the path to where the template should be rendered
  6380  	DestPath string
  6381  
  6382  	// EmbeddedTmpl store the raw template. This is useful for smaller templates
  6383  	// where they are embedded in the job file rather than sent as an artifact
  6384  	EmbeddedTmpl string
  6385  
  6386  	// ChangeMode indicates what should be done if the template is re-rendered
  6387  	ChangeMode string
  6388  
  6389  	// ChangeSignal is the signal that should be sent if the change mode
  6390  	// requires it.
  6391  	ChangeSignal string
  6392  
  6393  	// Splay is used to avoid coordinated restarts of processes by applying a
  6394  	// random wait between 0 and the given splay value before signalling the
  6395  	// application of a change
  6396  	Splay time.Duration
  6397  
  6398  	// Perms is the permission the file should be written out with.
  6399  	Perms string
  6400  
  6401  	// LeftDelim and RightDelim are optional configurations to control what
  6402  	// delimiter is utilized when parsing the template.
  6403  	LeftDelim  string
  6404  	RightDelim string
  6405  
  6406  	// Envvars enables exposing the template as environment variables
  6407  	// instead of as a file. The template must be of the form:
  6408  	//
  6409  	//	VAR_NAME_1={{ key service/my-key }}
  6410  	//	VAR_NAME_2=raw string and {{ env "attr.kernel.name" }}
  6411  	//
  6412  	// Lines will be split on the initial "=" with the first part being the
  6413  	// key name and the second part the value.
  6414  	// Empty lines and lines starting with # will be ignored, but to avoid
  6415  	// escaping issues #s within lines will not be treated as comments.
  6416  	Envvars bool
  6417  
  6418  	// VaultGrace is the grace duration between lease renewal and reacquiring a
  6419  	// secret. If the lease of a secret is less than the grace, a new secret is
  6420  	// acquired.
  6421  	// COMPAT(0.12) VaultGrace has been ignored by Vault since Vault v0.5.
  6422  	VaultGrace time.Duration
  6423  }
  6424  
  6425  // DefaultTemplate returns a default template.
  6426  func DefaultTemplate() *Template {
  6427  	return &Template{
  6428  		ChangeMode: TemplateChangeModeRestart,
  6429  		Splay:      5 * time.Second,
  6430  		Perms:      "0644",
  6431  	}
  6432  }
  6433  
  6434  func (t *Template) Copy() *Template {
  6435  	if t == nil {
  6436  		return nil
  6437  	}
  6438  	copy := new(Template)
  6439  	*copy = *t
  6440  	return copy
  6441  }
  6442  
  6443  func (t *Template) Canonicalize() {
  6444  	if t.ChangeSignal != "" {
  6445  		t.ChangeSignal = strings.ToUpper(t.ChangeSignal)
  6446  	}
  6447  }
  6448  
  6449  func (t *Template) Validate() error {
  6450  	var mErr multierror.Error
  6451  
  6452  	// Verify we have something to render
  6453  	if t.SourcePath == "" && t.EmbeddedTmpl == "" {
  6454  		multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template"))
  6455  	}
  6456  
  6457  	// Verify we can render somewhere
  6458  	if t.DestPath == "" {
  6459  		multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template"))
  6460  	}
  6461  
  6462  	// Verify the destination doesn't escape
  6463  	escaped, err := PathEscapesAllocDir("task", t.DestPath)
  6464  	if err != nil {
  6465  		mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
  6466  	} else if escaped {
  6467  		mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
  6468  	}
  6469  
  6470  	// Verify a proper change mode
  6471  	switch t.ChangeMode {
  6472  	case TemplateChangeModeNoop, TemplateChangeModeRestart:
  6473  	case TemplateChangeModeSignal:
  6474  		if t.ChangeSignal == "" {
  6475  			multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal"))
  6476  		}
  6477  		if t.Envvars {
  6478  			multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates"))
  6479  		}
  6480  	default:
  6481  		multierror.Append(&mErr, TemplateChangeModeInvalidError)
  6482  	}
  6483  
  6484  	// Verify the splay is positive
  6485  	if t.Splay < 0 {
  6486  		multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value"))
  6487  	}
  6488  
  6489  	// Verify the permissions
  6490  	if t.Perms != "" {
  6491  		if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil {
  6492  			multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err))
  6493  		}
  6494  	}
  6495  
  6496  	return mErr.ErrorOrNil()
  6497  }
  6498  
  6499  func (t *Template) Warnings() error {
  6500  	var mErr multierror.Error
  6501  
  6502  	// Deprecation notice for vault_grace
  6503  	if t.VaultGrace != 0 {
  6504  		mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."))
  6505  	}
  6506  
  6507  	return mErr.ErrorOrNil()
  6508  }
  6509  
  6510  // Set of possible states for a task.
  6511  const (
  6512  	TaskStatePending = "pending" // The task is waiting to be run.
  6513  	TaskStateRunning = "running" // The task is currently running.
  6514  	TaskStateDead    = "dead"    // Terminal state of task.
  6515  )
  6516  
  6517  // TaskState tracks the current state of a task and events that caused state
  6518  // transitions.
  6519  type TaskState struct {
  6520  	// The current state of the task.
  6521  	State string
  6522  
  6523  	// Failed marks a task as having failed
  6524  	Failed bool
  6525  
  6526  	// Restarts is the number of times the task has restarted
  6527  	Restarts uint64
  6528  
  6529  	// LastRestart is the time the task last restarted. It is updated each time the
  6530  	// task restarts
  6531  	LastRestart time.Time
  6532  
  6533  	// StartedAt is the time the task is started. It is updated each time the
  6534  	// task starts
  6535  	StartedAt time.Time
  6536  
  6537  	// FinishedAt is the time at which the task transitioned to dead and will
  6538  	// not be started again.
  6539  	FinishedAt time.Time
  6540  
  6541  	// Series of task events that transition the state of the task.
  6542  	Events []*TaskEvent
  6543  }
  6544  
  6545  // NewTaskState returns a TaskState initialized in the Pending state.
  6546  func NewTaskState() *TaskState {
  6547  	return &TaskState{
  6548  		State: TaskStatePending,
  6549  	}
  6550  }
  6551  
  6552  // Canonicalize ensures the TaskState has a State set. It should default to
  6553  // Pending.
  6554  func (ts *TaskState) Canonicalize() {
  6555  	if ts.State == "" {
  6556  		ts.State = TaskStatePending
  6557  	}
  6558  }
  6559  
  6560  func (ts *TaskState) Copy() *TaskState {
  6561  	if ts == nil {
  6562  		return nil
  6563  	}
  6564  	copy := new(TaskState)
  6565  	*copy = *ts
  6566  
  6567  	if ts.Events != nil {
  6568  		copy.Events = make([]*TaskEvent, len(ts.Events))
  6569  		for i, e := range ts.Events {
  6570  			copy.Events[i] = e.Copy()
  6571  		}
  6572  	}
  6573  	return copy
  6574  }
  6575  
  6576  // Successful returns whether a task finished successfully. This doesn't really
  6577  // have meaning on a non-batch allocation because a service and system
  6578  // allocation should not finish.
  6579  func (ts *TaskState) Successful() bool {
  6580  	return ts.State == TaskStateDead && !ts.Failed
  6581  }
  6582  
  6583  const (
  6584  	// TaskSetupFailure indicates that the task could not be started due to a
  6585  	// a setup failure.
  6586  	TaskSetupFailure = "Setup Failure"
  6587  
  6588  	// TaskDriveFailure indicates that the task could not be started due to a
  6589  	// failure in the driver. TaskDriverFailure is considered Recoverable.
  6590  	TaskDriverFailure = "Driver Failure"
  6591  
  6592  	// TaskReceived signals that the task has been pulled by the client at the
  6593  	// given timestamp.
  6594  	TaskReceived = "Received"
  6595  
  6596  	// TaskFailedValidation indicates the task was invalid and as such was not run.
  6597  	// TaskFailedValidation is not considered Recoverable.
  6598  	TaskFailedValidation = "Failed Validation"
  6599  
  6600  	// TaskStarted signals that the task was started and its timestamp can be
  6601  	// used to determine the running length of the task.
  6602  	TaskStarted = "Started"
  6603  
  6604  	// TaskTerminated indicates that the task was started and exited.
  6605  	TaskTerminated = "Terminated"
  6606  
  6607  	// TaskKilling indicates a kill signal has been sent to the task.
  6608  	TaskKilling = "Killing"
  6609  
  6610  	// TaskKilled indicates a user has killed the task.
  6611  	TaskKilled = "Killed"
  6612  
  6613  	// TaskRestarting indicates that task terminated and is being restarted.
  6614  	TaskRestarting = "Restarting"
  6615  
  6616  	// TaskNotRestarting indicates that the task has failed and is not being
  6617  	// restarted because it has exceeded its restart policy.
  6618  	TaskNotRestarting = "Not Restarting"
  6619  
  6620  	// TaskRestartSignal indicates that the task has been signalled to be
  6621  	// restarted
  6622  	TaskRestartSignal = "Restart Signaled"
  6623  
  6624  	// TaskSignaling indicates that the task is being signalled.
  6625  	TaskSignaling = "Signaling"
  6626  
  6627  	// TaskDownloadingArtifacts means the task is downloading the artifacts
  6628  	// specified in the task.
  6629  	TaskDownloadingArtifacts = "Downloading Artifacts"
  6630  
  6631  	// TaskArtifactDownloadFailed indicates that downloading the artifacts
  6632  	// failed.
  6633  	TaskArtifactDownloadFailed = "Failed Artifact Download"
  6634  
  6635  	// TaskBuildingTaskDir indicates that the task directory/chroot is being
  6636  	// built.
  6637  	TaskBuildingTaskDir = "Building Task Directory"
  6638  
  6639  	// TaskSetup indicates the task runner is setting up the task environment
  6640  	TaskSetup = "Task Setup"
  6641  
  6642  	// TaskDiskExceeded indicates that one of the tasks in a taskgroup has
  6643  	// exceeded the requested disk resources.
  6644  	TaskDiskExceeded = "Disk Resources Exceeded"
  6645  
  6646  	// TaskSiblingFailed indicates that a sibling task in the task group has
  6647  	// failed.
  6648  	TaskSiblingFailed = "Sibling Task Failed"
  6649  
  6650  	// TaskDriverMessage is an informational event message emitted by
  6651  	// drivers such as when they're performing a long running action like
  6652  	// downloading an image.
  6653  	TaskDriverMessage = "Driver"
  6654  
  6655  	// TaskLeaderDead indicates that the leader task within the has finished.
  6656  	TaskLeaderDead = "Leader Task Dead"
  6657  
  6658  	// TaskHookFailed indicates that one of the hooks for a task failed.
  6659  	TaskHookFailed = "Task hook failed"
  6660  
  6661  	// TaskRestoreFailed indicates Nomad was unable to reattach to a
  6662  	// restored task.
  6663  	TaskRestoreFailed = "Failed Restoring Task"
  6664  
  6665  	// TaskPluginUnhealthy indicates that a plugin managed by Nomad became unhealthy
  6666  	TaskPluginUnhealthy = "Plugin became unhealthy"
  6667  
  6668  	// TaskPluginHealthy indicates that a plugin managed by Nomad became healthy
  6669  	TaskPluginHealthy = "Plugin became healthy"
  6670  )
  6671  
  6672  // TaskEvent is an event that effects the state of a task and contains meta-data
  6673  // appropriate to the events type.
  6674  type TaskEvent struct {
  6675  	Type string
  6676  	Time int64 // Unix Nanosecond timestamp
  6677  
  6678  	Message string // A possible message explaining the termination of the task.
  6679  
  6680  	// DisplayMessage is a human friendly message about the event
  6681  	DisplayMessage string
  6682  
  6683  	// Details is a map with annotated info about the event
  6684  	Details map[string]string
  6685  
  6686  	// DEPRECATION NOTICE: The following fields are deprecated and will be removed
  6687  	// in a future release. Field values are available in the Details map.
  6688  
  6689  	// FailsTask marks whether this event fails the task.
  6690  	// Deprecated, use Details["fails_task"] to access this.
  6691  	FailsTask bool
  6692  
  6693  	// Restart fields.
  6694  	// Deprecated, use Details["restart_reason"] to access this.
  6695  	RestartReason string
  6696  
  6697  	// Setup Failure fields.
  6698  	// Deprecated, use Details["setup_error"] to access this.
  6699  	SetupError string
  6700  
  6701  	// Driver Failure fields.
  6702  	// Deprecated, use Details["driver_error"] to access this.
  6703  	DriverError string // A driver error occurred while starting the task.
  6704  
  6705  	// Task Terminated Fields.
  6706  
  6707  	// Deprecated, use Details["exit_code"] to access this.
  6708  	ExitCode int // The exit code of the task.
  6709  
  6710  	// Deprecated, use Details["signal"] to access this.
  6711  	Signal int // The signal that terminated the task.
  6712  
  6713  	// Killing fields
  6714  	// Deprecated, use Details["kill_timeout"] to access this.
  6715  	KillTimeout time.Duration
  6716  
  6717  	// Task Killed Fields.
  6718  	// Deprecated, use Details["kill_error"] to access this.
  6719  	KillError string // Error killing the task.
  6720  
  6721  	// KillReason is the reason the task was killed
  6722  	// Deprecated, use Details["kill_reason"] to access this.
  6723  	KillReason string
  6724  
  6725  	// TaskRestarting fields.
  6726  	// Deprecated, use Details["start_delay"] to access this.
  6727  	StartDelay int64 // The sleep period before restarting the task in unix nanoseconds.
  6728  
  6729  	// Artifact Download fields
  6730  	// Deprecated, use Details["download_error"] to access this.
  6731  	DownloadError string // Error downloading artifacts
  6732  
  6733  	// Validation fields
  6734  	// Deprecated, use Details["validation_error"] to access this.
  6735  	ValidationError string // Validation error
  6736  
  6737  	// The maximum allowed task disk size.
  6738  	// Deprecated, use Details["disk_limit"] to access this.
  6739  	DiskLimit int64
  6740  
  6741  	// Name of the sibling task that caused termination of the task that
  6742  	// the TaskEvent refers to.
  6743  	// Deprecated, use Details["failed_sibling"] to access this.
  6744  	FailedSibling string
  6745  
  6746  	// VaultError is the error from token renewal
  6747  	// Deprecated, use Details["vault_renewal_error"] to access this.
  6748  	VaultError string
  6749  
  6750  	// TaskSignalReason indicates the reason the task is being signalled.
  6751  	// Deprecated, use Details["task_signal_reason"] to access this.
  6752  	TaskSignalReason string
  6753  
  6754  	// TaskSignal is the signal that was sent to the task
  6755  	// Deprecated, use Details["task_signal"] to access this.
  6756  	TaskSignal string
  6757  
  6758  	// DriverMessage indicates a driver action being taken.
  6759  	// Deprecated, use Details["driver_message"] to access this.
  6760  	DriverMessage string
  6761  
  6762  	// GenericSource is the source of a message.
  6763  	// Deprecated, is redundant with event type.
  6764  	GenericSource string
  6765  }
  6766  
  6767  func (event *TaskEvent) PopulateEventDisplayMessage() {
  6768  	// Build up the description based on the event type.
  6769  	if event == nil { //TODO(preetha) needs investigation alloc_runner's Run method sends a nil event when sigterming nomad. Why?
  6770  		return
  6771  	}
  6772  
  6773  	if event.DisplayMessage != "" {
  6774  		return
  6775  	}
  6776  
  6777  	var desc string
  6778  	switch event.Type {
  6779  	case TaskSetup:
  6780  		desc = event.Message
  6781  	case TaskStarted:
  6782  		desc = "Task started by client"
  6783  	case TaskReceived:
  6784  		desc = "Task received by client"
  6785  	case TaskFailedValidation:
  6786  		if event.ValidationError != "" {
  6787  			desc = event.ValidationError
  6788  		} else {
  6789  			desc = "Validation of task failed"
  6790  		}
  6791  	case TaskSetupFailure:
  6792  		if event.SetupError != "" {
  6793  			desc = event.SetupError
  6794  		} else {
  6795  			desc = "Task setup failed"
  6796  		}
  6797  	case TaskDriverFailure:
  6798  		if event.DriverError != "" {
  6799  			desc = event.DriverError
  6800  		} else {
  6801  			desc = "Failed to start task"
  6802  		}
  6803  	case TaskDownloadingArtifacts:
  6804  		desc = "Client is downloading artifacts"
  6805  	case TaskArtifactDownloadFailed:
  6806  		if event.DownloadError != "" {
  6807  			desc = event.DownloadError
  6808  		} else {
  6809  			desc = "Failed to download artifacts"
  6810  		}
  6811  	case TaskKilling:
  6812  		if event.KillReason != "" {
  6813  			desc = event.KillReason
  6814  		} else if event.KillTimeout != 0 {
  6815  			desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", event.KillTimeout)
  6816  		} else {
  6817  			desc = "Sent interrupt"
  6818  		}
  6819  	case TaskKilled:
  6820  		if event.KillError != "" {
  6821  			desc = event.KillError
  6822  		} else {
  6823  			desc = "Task successfully killed"
  6824  		}
  6825  	case TaskTerminated:
  6826  		var parts []string
  6827  		parts = append(parts, fmt.Sprintf("Exit Code: %d", event.ExitCode))
  6828  
  6829  		if event.Signal != 0 {
  6830  			parts = append(parts, fmt.Sprintf("Signal: %d", event.Signal))
  6831  		}
  6832  
  6833  		if event.Message != "" {
  6834  			parts = append(parts, fmt.Sprintf("Exit Message: %q", event.Message))
  6835  		}
  6836  		desc = strings.Join(parts, ", ")
  6837  	case TaskRestarting:
  6838  		in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay))
  6839  		if event.RestartReason != "" && event.RestartReason != ReasonWithinPolicy {
  6840  			desc = fmt.Sprintf("%s - %s", event.RestartReason, in)
  6841  		} else {
  6842  			desc = in
  6843  		}
  6844  	case TaskNotRestarting:
  6845  		if event.RestartReason != "" {
  6846  			desc = event.RestartReason
  6847  		} else {
  6848  			desc = "Task exceeded restart policy"
  6849  		}
  6850  	case TaskSiblingFailed:
  6851  		if event.FailedSibling != "" {
  6852  			desc = fmt.Sprintf("Task's sibling %q failed", event.FailedSibling)
  6853  		} else {
  6854  			desc = "Task's sibling failed"
  6855  		}
  6856  	case TaskSignaling:
  6857  		sig := event.TaskSignal
  6858  		reason := event.TaskSignalReason
  6859  
  6860  		if sig == "" && reason == "" {
  6861  			desc = "Task being sent a signal"
  6862  		} else if sig == "" {
  6863  			desc = reason
  6864  		} else if reason == "" {
  6865  			desc = fmt.Sprintf("Task being sent signal %v", sig)
  6866  		} else {
  6867  			desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
  6868  		}
  6869  	case TaskRestartSignal:
  6870  		if event.RestartReason != "" {
  6871  			desc = event.RestartReason
  6872  		} else {
  6873  			desc = "Task signaled to restart"
  6874  		}
  6875  	case TaskDriverMessage:
  6876  		desc = event.DriverMessage
  6877  	case TaskLeaderDead:
  6878  		desc = "Leader Task in Group dead"
  6879  	default:
  6880  		desc = event.Message
  6881  	}
  6882  
  6883  	event.DisplayMessage = desc
  6884  }
  6885  
  6886  func (te *TaskEvent) GoString() string {
  6887  	return fmt.Sprintf("%v - %v", te.Time, te.Type)
  6888  }
  6889  
  6890  // SetDisplayMessage sets the display message of TaskEvent
  6891  func (te *TaskEvent) SetDisplayMessage(msg string) *TaskEvent {
  6892  	te.DisplayMessage = msg
  6893  	return te
  6894  }
  6895  
  6896  // SetMessage sets the message of TaskEvent
  6897  func (te *TaskEvent) SetMessage(msg string) *TaskEvent {
  6898  	te.Message = msg
  6899  	te.Details["message"] = msg
  6900  	return te
  6901  }
  6902  
  6903  func (te *TaskEvent) Copy() *TaskEvent {
  6904  	if te == nil {
  6905  		return nil
  6906  	}
  6907  	copy := new(TaskEvent)
  6908  	*copy = *te
  6909  	return copy
  6910  }
  6911  
  6912  func NewTaskEvent(event string) *TaskEvent {
  6913  	return &TaskEvent{
  6914  		Type:    event,
  6915  		Time:    time.Now().UnixNano(),
  6916  		Details: make(map[string]string),
  6917  	}
  6918  }
  6919  
  6920  // SetSetupError is used to store an error that occurred while setting up the
  6921  // task
  6922  func (e *TaskEvent) SetSetupError(err error) *TaskEvent {
  6923  	if err != nil {
  6924  		e.SetupError = err.Error()
  6925  		e.Details["setup_error"] = err.Error()
  6926  	}
  6927  	return e
  6928  }
  6929  
  6930  func (e *TaskEvent) SetFailsTask() *TaskEvent {
  6931  	e.FailsTask = true
  6932  	e.Details["fails_task"] = "true"
  6933  	return e
  6934  }
  6935  
  6936  func (e *TaskEvent) SetDriverError(err error) *TaskEvent {
  6937  	if err != nil {
  6938  		e.DriverError = err.Error()
  6939  		e.Details["driver_error"] = err.Error()
  6940  	}
  6941  	return e
  6942  }
  6943  
  6944  func (e *TaskEvent) SetExitCode(c int) *TaskEvent {
  6945  	e.ExitCode = c
  6946  	e.Details["exit_code"] = fmt.Sprintf("%d", c)
  6947  	return e
  6948  }
  6949  
  6950  func (e *TaskEvent) SetSignal(s int) *TaskEvent {
  6951  	e.Signal = s
  6952  	e.Details["signal"] = fmt.Sprintf("%d", s)
  6953  	return e
  6954  }
  6955  
  6956  func (e *TaskEvent) SetSignalText(s string) *TaskEvent {
  6957  	e.Details["signal"] = s
  6958  	return e
  6959  }
  6960  
  6961  func (e *TaskEvent) SetExitMessage(err error) *TaskEvent {
  6962  	if err != nil {
  6963  		e.Message = err.Error()
  6964  		e.Details["exit_message"] = err.Error()
  6965  	}
  6966  	return e
  6967  }
  6968  
  6969  func (e *TaskEvent) SetKillError(err error) *TaskEvent {
  6970  	if err != nil {
  6971  		e.KillError = err.Error()
  6972  		e.Details["kill_error"] = err.Error()
  6973  	}
  6974  	return e
  6975  }
  6976  
  6977  func (e *TaskEvent) SetKillReason(r string) *TaskEvent {
  6978  	e.KillReason = r
  6979  	e.Details["kill_reason"] = r
  6980  	return e
  6981  }
  6982  
  6983  func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent {
  6984  	e.StartDelay = int64(delay)
  6985  	e.Details["start_delay"] = fmt.Sprintf("%d", delay)
  6986  	return e
  6987  }
  6988  
  6989  func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent {
  6990  	e.RestartReason = reason
  6991  	e.Details["restart_reason"] = reason
  6992  	return e
  6993  }
  6994  
  6995  func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent {
  6996  	e.TaskSignalReason = r
  6997  	e.Details["task_signal_reason"] = r
  6998  	return e
  6999  }
  7000  
  7001  func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent {
  7002  	e.TaskSignal = s.String()
  7003  	e.Details["task_signal"] = s.String()
  7004  	return e
  7005  }
  7006  
  7007  func (e *TaskEvent) SetDownloadError(err error) *TaskEvent {
  7008  	if err != nil {
  7009  		e.DownloadError = err.Error()
  7010  		e.Details["download_error"] = err.Error()
  7011  	}
  7012  	return e
  7013  }
  7014  
  7015  func (e *TaskEvent) SetValidationError(err error) *TaskEvent {
  7016  	if err != nil {
  7017  		e.ValidationError = err.Error()
  7018  		e.Details["validation_error"] = err.Error()
  7019  	}
  7020  	return e
  7021  }
  7022  
  7023  func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent {
  7024  	e.KillTimeout = timeout
  7025  	e.Details["kill_timeout"] = timeout.String()
  7026  	return e
  7027  }
  7028  
  7029  func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent {
  7030  	e.DiskLimit = limit
  7031  	e.Details["disk_limit"] = fmt.Sprintf("%d", limit)
  7032  	return e
  7033  }
  7034  
  7035  func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent {
  7036  	e.FailedSibling = sibling
  7037  	e.Details["failed_sibling"] = sibling
  7038  	return e
  7039  }
  7040  
  7041  func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent {
  7042  	if err != nil {
  7043  		e.VaultError = err.Error()
  7044  		e.Details["vault_renewal_error"] = err.Error()
  7045  	}
  7046  	return e
  7047  }
  7048  
  7049  func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent {
  7050  	e.DriverMessage = m
  7051  	e.Details["driver_message"] = m
  7052  	return e
  7053  }
  7054  
  7055  func (e *TaskEvent) SetOOMKilled(oom bool) *TaskEvent {
  7056  	e.Details["oom_killed"] = strconv.FormatBool(oom)
  7057  	return e
  7058  }
  7059  
  7060  // TaskArtifact is an artifact to download before running the task.
  7061  type TaskArtifact struct {
  7062  	// GetterSource is the source to download an artifact using go-getter
  7063  	GetterSource string
  7064  
  7065  	// GetterOptions are options to use when downloading the artifact using
  7066  	// go-getter.
  7067  	GetterOptions map[string]string
  7068  
  7069  	// GetterMode is the go-getter.ClientMode for fetching resources.
  7070  	// Defaults to "any" but can be set to "file" or "dir".
  7071  	GetterMode string
  7072  
  7073  	// RelativeDest is the download destination given relative to the task's
  7074  	// directory.
  7075  	RelativeDest string
  7076  }
  7077  
  7078  func (ta *TaskArtifact) Copy() *TaskArtifact {
  7079  	if ta == nil {
  7080  		return nil
  7081  	}
  7082  	nta := new(TaskArtifact)
  7083  	*nta = *ta
  7084  	nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions)
  7085  	return nta
  7086  }
  7087  
  7088  func (ta *TaskArtifact) GoString() string {
  7089  	return fmt.Sprintf("%+v", ta)
  7090  }
  7091  
  7092  // Hash creates a unique identifier for a TaskArtifact as the same GetterSource
  7093  // may be specified multiple times with different destinations.
  7094  func (ta *TaskArtifact) Hash() string {
  7095  	hash, err := blake2b.New256(nil)
  7096  	if err != nil {
  7097  		panic(err)
  7098  	}
  7099  
  7100  	hash.Write([]byte(ta.GetterSource))
  7101  
  7102  	// Must iterate over keys in a consistent order
  7103  	keys := make([]string, 0, len(ta.GetterOptions))
  7104  	for k := range ta.GetterOptions {
  7105  		keys = append(keys, k)
  7106  	}
  7107  	sort.Strings(keys)
  7108  	for _, k := range keys {
  7109  		hash.Write([]byte(k))
  7110  		hash.Write([]byte(ta.GetterOptions[k]))
  7111  	}
  7112  
  7113  	hash.Write([]byte(ta.GetterMode))
  7114  	hash.Write([]byte(ta.RelativeDest))
  7115  	return base64.RawStdEncoding.EncodeToString(hash.Sum(nil))
  7116  }
  7117  
  7118  // PathEscapesAllocDir returns if the given path escapes the allocation
  7119  // directory. The prefix allows adding a prefix if the path will be joined, for
  7120  // example a "task/local" prefix may be provided if the path will be joined
  7121  // against that prefix.
  7122  func PathEscapesAllocDir(prefix, path string) (bool, error) {
  7123  	// Verify the destination doesn't escape the tasks directory
  7124  	alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/"))
  7125  	if err != nil {
  7126  		return false, err
  7127  	}
  7128  	abs, err := filepath.Abs(filepath.Join(alloc, prefix, path))
  7129  	if err != nil {
  7130  		return false, err
  7131  	}
  7132  	rel, err := filepath.Rel(alloc, abs)
  7133  	if err != nil {
  7134  		return false, err
  7135  	}
  7136  
  7137  	return strings.HasPrefix(rel, ".."), nil
  7138  }
  7139  
  7140  func (ta *TaskArtifact) Validate() error {
  7141  	// Verify the source
  7142  	var mErr multierror.Error
  7143  	if ta.GetterSource == "" {
  7144  		mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified"))
  7145  	}
  7146  
  7147  	switch ta.GetterMode {
  7148  	case "":
  7149  		// Default to any
  7150  		ta.GetterMode = GetterModeAny
  7151  	case GetterModeAny, GetterModeFile, GetterModeDir:
  7152  		// Ok
  7153  	default:
  7154  		mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s",
  7155  			ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
  7156  	}
  7157  
  7158  	escaped, err := PathEscapesAllocDir("task", ta.RelativeDest)
  7159  	if err != nil {
  7160  		mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
  7161  	} else if escaped {
  7162  		mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
  7163  	}
  7164  
  7165  	if err := ta.validateChecksum(); err != nil {
  7166  		mErr.Errors = append(mErr.Errors, err)
  7167  	}
  7168  
  7169  	return mErr.ErrorOrNil()
  7170  }
  7171  
  7172  func (ta *TaskArtifact) validateChecksum() error {
  7173  	check, ok := ta.GetterOptions["checksum"]
  7174  	if !ok {
  7175  		return nil
  7176  	}
  7177  
  7178  	// Job struct validation occurs before interpolation resolution can be effective.
  7179  	// Skip checking if checksum contain variable reference, and artifacts fetching will
  7180  	// eventually fail, if checksum is indeed invalid.
  7181  	if args.ContainsEnv(check) {
  7182  		return nil
  7183  	}
  7184  
  7185  	check = strings.TrimSpace(check)
  7186  	if check == "" {
  7187  		return fmt.Errorf("checksum value cannot be empty")
  7188  	}
  7189  
  7190  	parts := strings.Split(check, ":")
  7191  	if l := len(parts); l != 2 {
  7192  		return fmt.Errorf(`checksum must be given as "type:value"; got %q`, check)
  7193  	}
  7194  
  7195  	checksumVal := parts[1]
  7196  	checksumBytes, err := hex.DecodeString(checksumVal)
  7197  	if err != nil {
  7198  		return fmt.Errorf("invalid checksum: %v", err)
  7199  	}
  7200  
  7201  	checksumType := parts[0]
  7202  	expectedLength := 0
  7203  	switch checksumType {
  7204  	case "md5":
  7205  		expectedLength = md5.Size
  7206  	case "sha1":
  7207  		expectedLength = sha1.Size
  7208  	case "sha256":
  7209  		expectedLength = sha256.Size
  7210  	case "sha512":
  7211  		expectedLength = sha512.Size
  7212  	default:
  7213  		return fmt.Errorf("unsupported checksum type: %s", checksumType)
  7214  	}
  7215  
  7216  	if len(checksumBytes) != expectedLength {
  7217  		return fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal)
  7218  	}
  7219  
  7220  	return nil
  7221  }
  7222  
  7223  const (
  7224  	ConstraintDistinctProperty  = "distinct_property"
  7225  	ConstraintDistinctHosts     = "distinct_hosts"
  7226  	ConstraintRegex             = "regexp"
  7227  	ConstraintVersion           = "version"
  7228  	ConstraintSemver            = "semver"
  7229  	ConstraintSetContains       = "set_contains"
  7230  	ConstraintSetContainsAll    = "set_contains_all"
  7231  	ConstraintSetContainsAny    = "set_contains_any"
  7232  	ConstraintAttributeIsSet    = "is_set"
  7233  	ConstraintAttributeIsNotSet = "is_not_set"
  7234  )
  7235  
  7236  // Constraints are used to restrict placement options.
  7237  type Constraint struct {
  7238  	LTarget string // Left-hand target
  7239  	RTarget string // Right-hand target
  7240  	Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near
  7241  	str     string // Memoized string
  7242  }
  7243  
  7244  // Equal checks if two constraints are equal
  7245  func (c *Constraint) Equals(o *Constraint) bool {
  7246  	return c == o ||
  7247  		c.LTarget == o.LTarget &&
  7248  			c.RTarget == o.RTarget &&
  7249  			c.Operand == o.Operand
  7250  }
  7251  
  7252  func (c *Constraint) Equal(o *Constraint) bool {
  7253  	return c.Equals(o)
  7254  }
  7255  
  7256  func (c *Constraint) Copy() *Constraint {
  7257  	if c == nil {
  7258  		return nil
  7259  	}
  7260  	nc := new(Constraint)
  7261  	*nc = *c
  7262  	return nc
  7263  }
  7264  
  7265  func (c *Constraint) String() string {
  7266  	if c.str != "" {
  7267  		return c.str
  7268  	}
  7269  	c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
  7270  	return c.str
  7271  }
  7272  
  7273  func (c *Constraint) Validate() error {
  7274  	var mErr multierror.Error
  7275  	if c.Operand == "" {
  7276  		mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand"))
  7277  	}
  7278  
  7279  	// requireLtarget specifies whether the constraint requires an LTarget to be
  7280  	// provided.
  7281  	requireLtarget := true
  7282  
  7283  	// Perform additional validation based on operand
  7284  	switch c.Operand {
  7285  	case ConstraintDistinctHosts:
  7286  		requireLtarget = false
  7287  	case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains:
  7288  		if c.RTarget == "" {
  7289  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget"))
  7290  		}
  7291  	case ConstraintRegex:
  7292  		if _, err := regexp.Compile(c.RTarget); err != nil {
  7293  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
  7294  		}
  7295  	case ConstraintVersion:
  7296  		if _, err := version.NewConstraint(c.RTarget); err != nil {
  7297  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err))
  7298  		}
  7299  	case ConstraintSemver:
  7300  		if _, err := semver.NewConstraint(c.RTarget); err != nil {
  7301  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Semver constraint is invalid: %v", err))
  7302  		}
  7303  	case ConstraintDistinctProperty:
  7304  		// If a count is set, make sure it is convertible to a uint64
  7305  		if c.RTarget != "" {
  7306  			count, err := strconv.ParseUint(c.RTarget, 10, 64)
  7307  			if err != nil {
  7308  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err))
  7309  			} else if count < 1 {
  7310  				mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count))
  7311  			}
  7312  		}
  7313  	case ConstraintAttributeIsSet, ConstraintAttributeIsNotSet:
  7314  		if c.RTarget != "" {
  7315  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q does not support an RTarget", c.Operand))
  7316  		}
  7317  	case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
  7318  		if c.RTarget == "" {
  7319  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand))
  7320  		}
  7321  	default:
  7322  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand))
  7323  	}
  7324  
  7325  	// Ensure we have an LTarget for the constraints that need one
  7326  	if requireLtarget && c.LTarget == "" {
  7327  		mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint"))
  7328  	}
  7329  
  7330  	return mErr.ErrorOrNil()
  7331  }
  7332  
  7333  type Constraints []*Constraint
  7334  
  7335  // Equals compares Constraints as a set
  7336  func (xs *Constraints) Equals(ys *Constraints) bool {
  7337  	if xs == ys {
  7338  		return true
  7339  	}
  7340  	if xs == nil || ys == nil {
  7341  		return false
  7342  	}
  7343  	if len(*xs) != len(*ys) {
  7344  		return false
  7345  	}
  7346  SETEQUALS:
  7347  	for _, x := range *xs {
  7348  		for _, y := range *ys {
  7349  			if x.Equals(y) {
  7350  				continue SETEQUALS
  7351  			}
  7352  		}
  7353  		return false
  7354  	}
  7355  	return true
  7356  }
  7357  
  7358  // Affinity is used to score placement options based on a weight
  7359  type Affinity struct {
  7360  	LTarget string // Left-hand target
  7361  	RTarget string // Right-hand target
  7362  	Operand string // Affinity operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any
  7363  	Weight  int8   // Weight applied to nodes that match the affinity. Can be negative
  7364  	str     string // Memoized string
  7365  }
  7366  
  7367  // Equal checks if two affinities are equal
  7368  func (a *Affinity) Equals(o *Affinity) bool {
  7369  	return a == o ||
  7370  		a.LTarget == o.LTarget &&
  7371  			a.RTarget == o.RTarget &&
  7372  			a.Operand == o.Operand &&
  7373  			a.Weight == o.Weight
  7374  }
  7375  
  7376  func (a *Affinity) Equal(o *Affinity) bool {
  7377  	return a.Equals(o)
  7378  }
  7379  
  7380  func (a *Affinity) Copy() *Affinity {
  7381  	if a == nil {
  7382  		return nil
  7383  	}
  7384  	na := new(Affinity)
  7385  	*na = *a
  7386  	return na
  7387  }
  7388  
  7389  func (a *Affinity) String() string {
  7390  	if a.str != "" {
  7391  		return a.str
  7392  	}
  7393  	a.str = fmt.Sprintf("%s %s %s %v", a.LTarget, a.Operand, a.RTarget, a.Weight)
  7394  	return a.str
  7395  }
  7396  
  7397  func (a *Affinity) Validate() error {
  7398  	var mErr multierror.Error
  7399  	if a.Operand == "" {
  7400  		mErr.Errors = append(mErr.Errors, errors.New("Missing affinity operand"))
  7401  	}
  7402  
  7403  	// Perform additional validation based on operand
  7404  	switch a.Operand {
  7405  	case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains:
  7406  		if a.RTarget == "" {
  7407  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains operators require an RTarget"))
  7408  		}
  7409  	case ConstraintRegex:
  7410  		if _, err := regexp.Compile(a.RTarget); err != nil {
  7411  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
  7412  		}
  7413  	case ConstraintVersion:
  7414  		if _, err := version.NewConstraint(a.RTarget); err != nil {
  7415  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Version affinity is invalid: %v", err))
  7416  		}
  7417  	case ConstraintSemver:
  7418  		if _, err := semver.NewConstraint(a.RTarget); err != nil {
  7419  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Semver affinity is invalid: %v", err))
  7420  		}
  7421  	case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
  7422  		if a.RTarget == "" {
  7423  			mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", a.Operand))
  7424  		}
  7425  	default:
  7426  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown affinity operator %q", a.Operand))
  7427  	}
  7428  
  7429  	// Ensure we have an LTarget
  7430  	if a.LTarget == "" {
  7431  		mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required"))
  7432  	}
  7433  
  7434  	// Ensure that weight is between -100 and 100, and not zero
  7435  	if a.Weight == 0 {
  7436  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight cannot be zero"))
  7437  	}
  7438  
  7439  	if a.Weight > 100 || a.Weight < -100 {
  7440  		mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight must be within the range [-100,100]"))
  7441  	}
  7442  
  7443  	return mErr.ErrorOrNil()
  7444  }
  7445  
  7446  // Spread is used to specify desired distribution of allocations according to weight
  7447  type Spread struct {
  7448  	// Attribute is the node attribute used as the spread criteria
  7449  	Attribute string
  7450  
  7451  	// Weight is the relative weight of this spread, useful when there are multiple
  7452  	// spread and affinities
  7453  	Weight int8
  7454  
  7455  	// SpreadTarget is used to describe desired percentages for each attribute value
  7456  	SpreadTarget []*SpreadTarget
  7457  
  7458  	// Memoized string representation
  7459  	str string
  7460  }
  7461  
  7462  type Affinities []*Affinity
  7463  
  7464  // Equals compares Affinities as a set
  7465  func (xs *Affinities) Equals(ys *Affinities) bool {
  7466  	if xs == ys {
  7467  		return true
  7468  	}
  7469  	if xs == nil || ys == nil {
  7470  		return false
  7471  	}
  7472  	if len(*xs) != len(*ys) {
  7473  		return false
  7474  	}
  7475  SETEQUALS:
  7476  	for _, x := range *xs {
  7477  		for _, y := range *ys {
  7478  			if x.Equals(y) {
  7479  				continue SETEQUALS
  7480  			}
  7481  		}
  7482  		return false
  7483  	}
  7484  	return true
  7485  }
  7486  
  7487  func (s *Spread) Copy() *Spread {
  7488  	if s == nil {
  7489  		return nil
  7490  	}
  7491  	ns := new(Spread)
  7492  	*ns = *s
  7493  
  7494  	ns.SpreadTarget = CopySliceSpreadTarget(s.SpreadTarget)
  7495  	return ns
  7496  }
  7497  
  7498  func (s *Spread) String() string {
  7499  	if s.str != "" {
  7500  		return s.str
  7501  	}
  7502  	s.str = fmt.Sprintf("%s %s %v", s.Attribute, s.SpreadTarget, s.Weight)
  7503  	return s.str
  7504  }
  7505  
  7506  func (s *Spread) Validate() error {
  7507  	var mErr multierror.Error
  7508  	if s.Attribute == "" {
  7509  		mErr.Errors = append(mErr.Errors, errors.New("Missing spread attribute"))
  7510  	}
  7511  	if s.Weight <= 0 || s.Weight > 100 {
  7512  		mErr.Errors = append(mErr.Errors, errors.New("Spread stanza must have a positive weight from 0 to 100"))
  7513  	}
  7514  	seen := make(map[string]struct{})
  7515  	sumPercent := uint32(0)
  7516  
  7517  	for _, target := range s.SpreadTarget {
  7518  		// Make sure there are no duplicates
  7519  		_, ok := seen[target.Value]
  7520  		if !ok {
  7521  			seen[target.Value] = struct{}{}
  7522  		} else {
  7523  			mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Spread target value %q already defined", target.Value)))
  7524  		}
  7525  		if target.Percent < 0 || target.Percent > 100 {
  7526  			mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Spread target percentage for value %q must be between 0 and 100", target.Value)))
  7527  		}
  7528  		sumPercent += uint32(target.Percent)
  7529  	}
  7530  	if sumPercent > 100 {
  7531  		mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Sum of spread target percentages must not be greater than 100%%; got %d%%", sumPercent)))
  7532  	}
  7533  	return mErr.ErrorOrNil()
  7534  }
  7535  
  7536  // SpreadTarget is used to specify desired percentages for each attribute value
  7537  type SpreadTarget struct {
  7538  	// Value is a single attribute value, like "dc1"
  7539  	Value string
  7540  
  7541  	// Percent is the desired percentage of allocs
  7542  	Percent uint8
  7543  
  7544  	// Memoized string representation
  7545  	str string
  7546  }
  7547  
  7548  func (s *SpreadTarget) Copy() *SpreadTarget {
  7549  	if s == nil {
  7550  		return nil
  7551  	}
  7552  
  7553  	ns := new(SpreadTarget)
  7554  	*ns = *s
  7555  	return ns
  7556  }
  7557  
  7558  func (s *SpreadTarget) String() string {
  7559  	if s.str != "" {
  7560  		return s.str
  7561  	}
  7562  	s.str = fmt.Sprintf("%q %v%%", s.Value, s.Percent)
  7563  	return s.str
  7564  }
  7565  
  7566  // EphemeralDisk is an ephemeral disk object
  7567  type EphemeralDisk struct {
  7568  	// Sticky indicates whether the allocation is sticky to a node
  7569  	Sticky bool
  7570  
  7571  	// SizeMB is the size of the local disk
  7572  	SizeMB int
  7573  
  7574  	// Migrate determines if Nomad client should migrate the allocation dir for
  7575  	// sticky allocations
  7576  	Migrate bool
  7577  }
  7578  
  7579  // DefaultEphemeralDisk returns a EphemeralDisk with default configurations
  7580  func DefaultEphemeralDisk() *EphemeralDisk {
  7581  	return &EphemeralDisk{
  7582  		SizeMB: 300,
  7583  	}
  7584  }
  7585  
  7586  // Validate validates EphemeralDisk
  7587  func (d *EphemeralDisk) Validate() error {
  7588  	if d.SizeMB < 10 {
  7589  		return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB)
  7590  	}
  7591  	return nil
  7592  }
  7593  
  7594  // Copy copies the EphemeralDisk struct and returns a new one
  7595  func (d *EphemeralDisk) Copy() *EphemeralDisk {
  7596  	ld := new(EphemeralDisk)
  7597  	*ld = *d
  7598  	return ld
  7599  }
  7600  
  7601  var (
  7602  	// VaultUnrecoverableError matches unrecoverable errors returned by a Vault
  7603  	// server
  7604  	VaultUnrecoverableError = regexp.MustCompile(`Code:\s+40(0|3|4)`)
  7605  )
  7606  
  7607  const (
  7608  	// VaultChangeModeNoop takes no action when a new token is retrieved.
  7609  	VaultChangeModeNoop = "noop"
  7610  
  7611  	// VaultChangeModeSignal signals the task when a new token is retrieved.
  7612  	VaultChangeModeSignal = "signal"
  7613  
  7614  	// VaultChangeModeRestart restarts the task when a new token is retrieved.
  7615  	VaultChangeModeRestart = "restart"
  7616  )
  7617  
  7618  // Vault stores the set of permissions a task needs access to from Vault.
  7619  type Vault struct {
  7620  	// Policies is the set of policies that the task needs access to
  7621  	Policies []string
  7622  
  7623  	// Env marks whether the Vault Token should be exposed as an environment
  7624  	// variable
  7625  	Env bool
  7626  
  7627  	// ChangeMode is used to configure the task's behavior when the Vault
  7628  	// token changes because the original token could not be renewed in time.
  7629  	ChangeMode string
  7630  
  7631  	// ChangeSignal is the signal sent to the task when a new token is
  7632  	// retrieved. This is only valid when using the signal change mode.
  7633  	ChangeSignal string
  7634  }
  7635  
  7636  func DefaultVaultBlock() *Vault {
  7637  	return &Vault{
  7638  		Env:        true,
  7639  		ChangeMode: VaultChangeModeRestart,
  7640  	}
  7641  }
  7642  
  7643  // Copy returns a copy of this Vault block.
  7644  func (v *Vault) Copy() *Vault {
  7645  	if v == nil {
  7646  		return nil
  7647  	}
  7648  
  7649  	nv := new(Vault)
  7650  	*nv = *v
  7651  	return nv
  7652  }
  7653  
  7654  func (v *Vault) Canonicalize() {
  7655  	if v.ChangeSignal != "" {
  7656  		v.ChangeSignal = strings.ToUpper(v.ChangeSignal)
  7657  	}
  7658  }
  7659  
  7660  // Validate returns if the Vault block is valid.
  7661  func (v *Vault) Validate() error {
  7662  	if v == nil {
  7663  		return nil
  7664  	}
  7665  
  7666  	var mErr multierror.Error
  7667  	if len(v.Policies) == 0 {
  7668  		multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty"))
  7669  	}
  7670  
  7671  	for _, p := range v.Policies {
  7672  		if p == "root" {
  7673  			multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy"))
  7674  		}
  7675  	}
  7676  
  7677  	switch v.ChangeMode {
  7678  	case VaultChangeModeSignal:
  7679  		if v.ChangeSignal == "" {
  7680  			multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal))
  7681  		}
  7682  	case VaultChangeModeNoop, VaultChangeModeRestart:
  7683  	default:
  7684  		multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode))
  7685  	}
  7686  
  7687  	return mErr.ErrorOrNil()
  7688  }
  7689  
  7690  const (
  7691  	// DeploymentStatuses are the various states a deployment can be be in
  7692  	DeploymentStatusRunning    = "running"
  7693  	DeploymentStatusPaused     = "paused"
  7694  	DeploymentStatusFailed     = "failed"
  7695  	DeploymentStatusSuccessful = "successful"
  7696  	DeploymentStatusCancelled  = "cancelled"
  7697  
  7698  	// TODO Statuses and Descriptions do not match 1:1 and we sometimes use the Description as a status flag
  7699  
  7700  	// DeploymentStatusDescriptions are the various descriptions of the states a
  7701  	// deployment can be in.
  7702  	DeploymentStatusDescriptionRunning               = "Deployment is running"
  7703  	DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires manual promotion"
  7704  	DeploymentStatusDescriptionRunningAutoPromotion  = "Deployment is running pending automatic promotion"
  7705  	DeploymentStatusDescriptionPaused                = "Deployment is paused"
  7706  	DeploymentStatusDescriptionSuccessful            = "Deployment completed successfully"
  7707  	DeploymentStatusDescriptionStoppedJob            = "Cancelled because job is stopped"
  7708  	DeploymentStatusDescriptionNewerJob              = "Cancelled due to newer version of job"
  7709  	DeploymentStatusDescriptionFailedAllocations     = "Failed due to unhealthy allocations"
  7710  	DeploymentStatusDescriptionProgressDeadline      = "Failed due to progress deadline"
  7711  	DeploymentStatusDescriptionFailedByUser          = "Deployment marked as failed"
  7712  )
  7713  
  7714  // DeploymentStatusDescriptionRollback is used to get the status description of
  7715  // a deployment when rolling back to an older job.
  7716  func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string {
  7717  	return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion)
  7718  }
  7719  
  7720  // DeploymentStatusDescriptionRollbackNoop is used to get the status description of
  7721  // a deployment when rolling back is not possible because it has the same specification
  7722  func DeploymentStatusDescriptionRollbackNoop(baseDescription string, jobVersion uint64) string {
  7723  	return fmt.Sprintf("%s - not rolling back to stable job version %d as current job has same specification", baseDescription, jobVersion)
  7724  }
  7725  
  7726  // DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of
  7727  // a deployment when there is no target to rollback to but autorevert is desired.
  7728  func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string {
  7729  	return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription)
  7730  }
  7731  
  7732  // Deployment is the object that represents a job deployment which is used to
  7733  // transition a job between versions.
  7734  type Deployment struct {
  7735  	// ID is a generated UUID for the deployment
  7736  	ID string
  7737  
  7738  	// Namespace is the namespace the deployment is created in
  7739  	Namespace string
  7740  
  7741  	// JobID is the job the deployment is created for
  7742  	JobID string
  7743  
  7744  	// JobVersion is the version of the job at which the deployment is tracking
  7745  	JobVersion uint64
  7746  
  7747  	// JobModifyIndex is the ModifyIndex of the job which the deployment is
  7748  	// tracking.
  7749  	JobModifyIndex uint64
  7750  
  7751  	// JobSpecModifyIndex is the JobModifyIndex of the job which the
  7752  	// deployment is tracking.
  7753  	JobSpecModifyIndex uint64
  7754  
  7755  	// JobCreateIndex is the create index of the job which the deployment is
  7756  	// tracking. It is needed so that if the job gets stopped and reran we can
  7757  	// present the correct list of deployments for the job and not old ones.
  7758  	JobCreateIndex uint64
  7759  
  7760  	// TaskGroups is the set of task groups effected by the deployment and their
  7761  	// current deployment status.
  7762  	TaskGroups map[string]*DeploymentState
  7763  
  7764  	// The status of the deployment
  7765  	Status string
  7766  
  7767  	// StatusDescription allows a human readable description of the deployment
  7768  	// status.
  7769  	StatusDescription string
  7770  
  7771  	CreateIndex uint64
  7772  	ModifyIndex uint64
  7773  }
  7774  
  7775  // NewDeployment creates a new deployment given the job.
  7776  func NewDeployment(job *Job) *Deployment {
  7777  	return &Deployment{
  7778  		ID:                 uuid.Generate(),
  7779  		Namespace:          job.Namespace,
  7780  		JobID:              job.ID,
  7781  		JobVersion:         job.Version,
  7782  		JobModifyIndex:     job.ModifyIndex,
  7783  		JobSpecModifyIndex: job.JobModifyIndex,
  7784  		JobCreateIndex:     job.CreateIndex,
  7785  		Status:             DeploymentStatusRunning,
  7786  		StatusDescription:  DeploymentStatusDescriptionRunning,
  7787  		TaskGroups:         make(map[string]*DeploymentState, len(job.TaskGroups)),
  7788  	}
  7789  }
  7790  
  7791  func (d *Deployment) Copy() *Deployment {
  7792  	if d == nil {
  7793  		return nil
  7794  	}
  7795  
  7796  	c := &Deployment{}
  7797  	*c = *d
  7798  
  7799  	c.TaskGroups = nil
  7800  	if l := len(d.TaskGroups); d.TaskGroups != nil {
  7801  		c.TaskGroups = make(map[string]*DeploymentState, l)
  7802  		for tg, s := range d.TaskGroups {
  7803  			c.TaskGroups[tg] = s.Copy()
  7804  		}
  7805  	}
  7806  
  7807  	return c
  7808  }
  7809  
  7810  // Active returns whether the deployment is active or terminal.
  7811  func (d *Deployment) Active() bool {
  7812  	switch d.Status {
  7813  	case DeploymentStatusRunning, DeploymentStatusPaused:
  7814  		return true
  7815  	default:
  7816  		return false
  7817  	}
  7818  }
  7819  
  7820  // GetID is a helper for getting the ID when the object may be nil
  7821  func (d *Deployment) GetID() string {
  7822  	if d == nil {
  7823  		return ""
  7824  	}
  7825  	return d.ID
  7826  }
  7827  
  7828  // HasPlacedCanaries returns whether the deployment has placed canaries
  7829  func (d *Deployment) HasPlacedCanaries() bool {
  7830  	if d == nil || len(d.TaskGroups) == 0 {
  7831  		return false
  7832  	}
  7833  	for _, group := range d.TaskGroups {
  7834  		if len(group.PlacedCanaries) != 0 {
  7835  			return true
  7836  		}
  7837  	}
  7838  	return false
  7839  }
  7840  
  7841  // RequiresPromotion returns whether the deployment requires promotion to
  7842  // continue
  7843  func (d *Deployment) RequiresPromotion() bool {
  7844  	if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
  7845  		return false
  7846  	}
  7847  	for _, group := range d.TaskGroups {
  7848  		if group.DesiredCanaries > 0 && !group.Promoted {
  7849  			return true
  7850  		}
  7851  	}
  7852  	return false
  7853  }
  7854  
  7855  // HasAutoPromote determines if all taskgroups are marked auto_promote
  7856  func (d *Deployment) HasAutoPromote() bool {
  7857  	if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
  7858  		return false
  7859  	}
  7860  	for _, group := range d.TaskGroups {
  7861  		if !group.AutoPromote {
  7862  			return false
  7863  		}
  7864  	}
  7865  	return true
  7866  }
  7867  
  7868  func (d *Deployment) GoString() string {
  7869  	base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription)
  7870  	for group, state := range d.TaskGroups {
  7871  		base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state)
  7872  	}
  7873  	return base
  7874  }
  7875  
  7876  // DeploymentState tracks the state of a deployment for a given task group.
  7877  type DeploymentState struct {
  7878  	// AutoRevert marks whether the task group has indicated the job should be
  7879  	// reverted on failure
  7880  	AutoRevert bool
  7881  
  7882  	// AutoPromote marks promotion triggered automatically by healthy canaries
  7883  	// copied from TaskGroup UpdateStrategy in scheduler.reconcile
  7884  	AutoPromote bool
  7885  
  7886  	// ProgressDeadline is the deadline by which an allocation must transition
  7887  	// to healthy before the deployment is considered failed.
  7888  	ProgressDeadline time.Duration
  7889  
  7890  	// RequireProgressBy is the time by which an allocation must transition
  7891  	// to healthy before the deployment is considered failed.
  7892  	RequireProgressBy time.Time
  7893  
  7894  	// Promoted marks whether the canaries have been promoted
  7895  	Promoted bool
  7896  
  7897  	// PlacedCanaries is the set of placed canary allocations
  7898  	PlacedCanaries []string
  7899  
  7900  	// DesiredCanaries is the number of canaries that should be created.
  7901  	DesiredCanaries int
  7902  
  7903  	// DesiredTotal is the total number of allocations that should be created as
  7904  	// part of the deployment.
  7905  	DesiredTotal int
  7906  
  7907  	// PlacedAllocs is the number of allocations that have been placed
  7908  	PlacedAllocs int
  7909  
  7910  	// HealthyAllocs is the number of allocations that have been marked healthy.
  7911  	HealthyAllocs int
  7912  
  7913  	// UnhealthyAllocs are allocations that have been marked as unhealthy.
  7914  	UnhealthyAllocs int
  7915  }
  7916  
  7917  func (d *DeploymentState) GoString() string {
  7918  	base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal)
  7919  	base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries)
  7920  	base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries)
  7921  	base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted)
  7922  	base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs)
  7923  	base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs)
  7924  	base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs)
  7925  	base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert)
  7926  	base += fmt.Sprintf("\n\tAutoPromote: %v", d.AutoPromote)
  7927  	return base
  7928  }
  7929  
  7930  func (d *DeploymentState) Copy() *DeploymentState {
  7931  	c := &DeploymentState{}
  7932  	*c = *d
  7933  	c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries)
  7934  	return c
  7935  }
  7936  
  7937  // DeploymentStatusUpdate is used to update the status of a given deployment
  7938  type DeploymentStatusUpdate struct {
  7939  	// DeploymentID is the ID of the deployment to update
  7940  	DeploymentID string
  7941  
  7942  	// Status is the new status of the deployment.
  7943  	Status string
  7944  
  7945  	// StatusDescription is the new status description of the deployment.
  7946  	StatusDescription string
  7947  }
  7948  
  7949  // RescheduleTracker encapsulates previous reschedule events
  7950  type RescheduleTracker struct {
  7951  	Events []*RescheduleEvent
  7952  }
  7953  
  7954  func (rt *RescheduleTracker) Copy() *RescheduleTracker {
  7955  	if rt == nil {
  7956  		return nil
  7957  	}
  7958  	nt := &RescheduleTracker{}
  7959  	*nt = *rt
  7960  	rescheduleEvents := make([]*RescheduleEvent, 0, len(rt.Events))
  7961  	for _, tracker := range rt.Events {
  7962  		rescheduleEvents = append(rescheduleEvents, tracker.Copy())
  7963  	}
  7964  	nt.Events = rescheduleEvents
  7965  	return nt
  7966  }
  7967  
  7968  // RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation
  7969  type RescheduleEvent struct {
  7970  	// RescheduleTime is the timestamp of a reschedule attempt
  7971  	RescheduleTime int64
  7972  
  7973  	// PrevAllocID is the ID of the previous allocation being restarted
  7974  	PrevAllocID string
  7975  
  7976  	// PrevNodeID is the node ID of the previous allocation
  7977  	PrevNodeID string
  7978  
  7979  	// Delay is the reschedule delay associated with the attempt
  7980  	Delay time.Duration
  7981  }
  7982  
  7983  func NewRescheduleEvent(rescheduleTime int64, prevAllocID string, prevNodeID string, delay time.Duration) *RescheduleEvent {
  7984  	return &RescheduleEvent{RescheduleTime: rescheduleTime,
  7985  		PrevAllocID: prevAllocID,
  7986  		PrevNodeID:  prevNodeID,
  7987  		Delay:       delay}
  7988  }
  7989  
  7990  func (re *RescheduleEvent) Copy() *RescheduleEvent {
  7991  	if re == nil {
  7992  		return nil
  7993  	}
  7994  	copy := new(RescheduleEvent)
  7995  	*copy = *re
  7996  	return copy
  7997  }
  7998  
  7999  // DesiredTransition is used to mark an allocation as having a desired state
  8000  // transition. This information can be used by the scheduler to make the
  8001  // correct decision.
  8002  type DesiredTransition struct {
  8003  	// Migrate is used to indicate that this allocation should be stopped and
  8004  	// migrated to another node.
  8005  	Migrate *bool
  8006  
  8007  	// Reschedule is used to indicate that this allocation is eligible to be
  8008  	// rescheduled. Most allocations are automatically eligible for
  8009  	// rescheduling, so this field is only required when an allocation is not
  8010  	// automatically eligible. An example is an allocation that is part of a
  8011  	// deployment.
  8012  	Reschedule *bool
  8013  
  8014  	// ForceReschedule is used to indicate that this allocation must be rescheduled.
  8015  	// This field is only used when operators want to force a placement even if
  8016  	// a failed allocation is not eligible to be rescheduled
  8017  	ForceReschedule *bool
  8018  }
  8019  
  8020  // Merge merges the two desired transitions, preferring the values from the
  8021  // passed in object.
  8022  func (d *DesiredTransition) Merge(o *DesiredTransition) {
  8023  	if o.Migrate != nil {
  8024  		d.Migrate = o.Migrate
  8025  	}
  8026  
  8027  	if o.Reschedule != nil {
  8028  		d.Reschedule = o.Reschedule
  8029  	}
  8030  
  8031  	if o.ForceReschedule != nil {
  8032  		d.ForceReschedule = o.ForceReschedule
  8033  	}
  8034  }
  8035  
  8036  // ShouldMigrate returns whether the transition object dictates a migration.
  8037  func (d *DesiredTransition) ShouldMigrate() bool {
  8038  	return d.Migrate != nil && *d.Migrate
  8039  }
  8040  
  8041  // ShouldReschedule returns whether the transition object dictates a
  8042  // rescheduling.
  8043  func (d *DesiredTransition) ShouldReschedule() bool {
  8044  	return d.Reschedule != nil && *d.Reschedule
  8045  }
  8046  
  8047  // ShouldForceReschedule returns whether the transition object dictates a
  8048  // forced rescheduling.
  8049  func (d *DesiredTransition) ShouldForceReschedule() bool {
  8050  	if d == nil {
  8051  		return false
  8052  	}
  8053  	return d.ForceReschedule != nil && *d.ForceReschedule
  8054  }
  8055  
  8056  const (
  8057  	AllocDesiredStatusRun   = "run"   // Allocation should run
  8058  	AllocDesiredStatusStop  = "stop"  // Allocation should stop
  8059  	AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
  8060  )
  8061  
  8062  const (
  8063  	AllocClientStatusPending  = "pending"
  8064  	AllocClientStatusRunning  = "running"
  8065  	AllocClientStatusComplete = "complete"
  8066  	AllocClientStatusFailed   = "failed"
  8067  	AllocClientStatusLost     = "lost"
  8068  )
  8069  
  8070  // Allocation is used to allocate the placement of a task group to a node.
  8071  type Allocation struct {
  8072  	// msgpack omit empty fields during serialization
  8073  	_struct bool `codec:",omitempty"` // nolint: structcheck
  8074  
  8075  	// ID of the allocation (UUID)
  8076  	ID string
  8077  
  8078  	// Namespace is the namespace the allocation is created in
  8079  	Namespace string
  8080  
  8081  	// ID of the evaluation that generated this allocation
  8082  	EvalID string
  8083  
  8084  	// Name is a logical name of the allocation.
  8085  	Name string
  8086  
  8087  	// NodeID is the node this is being placed on
  8088  	NodeID string
  8089  
  8090  	// NodeName is the name of the node this is being placed on.
  8091  	NodeName string
  8092  
  8093  	// Job is the parent job of the task group being allocated.
  8094  	// This is copied at allocation time to avoid issues if the job
  8095  	// definition is updated.
  8096  	JobID string
  8097  	Job   *Job
  8098  
  8099  	// TaskGroup is the name of the task group that should be run
  8100  	TaskGroup string
  8101  
  8102  	// COMPAT(0.11): Remove in 0.11
  8103  	// Resources is the total set of resources allocated as part
  8104  	// of this allocation of the task group. Dynamic ports will be set by
  8105  	// the scheduler.
  8106  	Resources *Resources
  8107  
  8108  	// SharedResources are the resources that are shared by all the tasks in an
  8109  	// allocation
  8110  	// Deprecated: use AllocatedResources.Shared instead.
  8111  	// Keep field to allow us to handle upgrade paths from old versions
  8112  	SharedResources *Resources
  8113  
  8114  	// TaskResources is the set of resources allocated to each
  8115  	// task. These should sum to the total Resources. Dynamic ports will be
  8116  	// set by the scheduler.
  8117  	// Deprecated: use AllocatedResources.Tasks instead.
  8118  	// Keep field to allow us to handle upgrade paths from old versions
  8119  	TaskResources map[string]*Resources
  8120  
  8121  	// AllocatedResources is the total resources allocated for the task group.
  8122  	AllocatedResources *AllocatedResources
  8123  
  8124  	// Metrics associated with this allocation
  8125  	Metrics *AllocMetric
  8126  
  8127  	// Desired Status of the allocation on the client
  8128  	DesiredStatus string
  8129  
  8130  	// DesiredStatusDescription is meant to provide more human useful information
  8131  	DesiredDescription string
  8132  
  8133  	// DesiredTransition is used to indicate that a state transition
  8134  	// is desired for a given reason.
  8135  	DesiredTransition DesiredTransition
  8136  
  8137  	// Status of the allocation on the client
  8138  	ClientStatus string
  8139  
  8140  	// ClientStatusDescription is meant to provide more human useful information
  8141  	ClientDescription string
  8142  
  8143  	// TaskStates stores the state of each task,
  8144  	TaskStates map[string]*TaskState
  8145  
  8146  	// PreviousAllocation is the allocation that this allocation is replacing
  8147  	PreviousAllocation string
  8148  
  8149  	// NextAllocation is the allocation that this allocation is being replaced by
  8150  	NextAllocation string
  8151  
  8152  	// DeploymentID identifies an allocation as being created from a
  8153  	// particular deployment
  8154  	DeploymentID string
  8155  
  8156  	// DeploymentStatus captures the status of the allocation as part of the
  8157  	// given deployment
  8158  	DeploymentStatus *AllocDeploymentStatus
  8159  
  8160  	// RescheduleTrackers captures details of previous reschedule attempts of the allocation
  8161  	RescheduleTracker *RescheduleTracker
  8162  
  8163  	// FollowupEvalID captures a follow up evaluation created to handle a failed allocation
  8164  	// that can be rescheduled in the future
  8165  	FollowupEvalID string
  8166  
  8167  	// PreemptedAllocations captures IDs of any allocations that were preempted
  8168  	// in order to place this allocation
  8169  	PreemptedAllocations []string
  8170  
  8171  	// PreemptedByAllocation tracks the alloc ID of the allocation that caused this allocation
  8172  	// to stop running because it got preempted
  8173  	PreemptedByAllocation string
  8174  
  8175  	// Raft Indexes
  8176  	CreateIndex uint64
  8177  	ModifyIndex uint64
  8178  
  8179  	// AllocModifyIndex is not updated when the client updates allocations. This
  8180  	// lets the client pull only the allocs updated by the server.
  8181  	AllocModifyIndex uint64
  8182  
  8183  	// CreateTime is the time the allocation has finished scheduling and been
  8184  	// verified by the plan applier.
  8185  	CreateTime int64
  8186  
  8187  	// ModifyTime is the time the allocation was last updated.
  8188  	ModifyTime int64
  8189  }
  8190  
  8191  // Index returns the index of the allocation. If the allocation is from a task
  8192  // group with count greater than 1, there will be multiple allocations for it.
  8193  func (a *Allocation) Index() uint {
  8194  	l := len(a.Name)
  8195  	prefix := len(a.JobID) + len(a.TaskGroup) + 2
  8196  	if l <= 3 || l <= prefix {
  8197  		return uint(0)
  8198  	}
  8199  
  8200  	strNum := a.Name[prefix : len(a.Name)-1]
  8201  	num, _ := strconv.Atoi(strNum)
  8202  	return uint(num)
  8203  }
  8204  
  8205  // Copy provides a copy of the allocation and deep copies the job
  8206  func (a *Allocation) Copy() *Allocation {
  8207  	return a.copyImpl(true)
  8208  }
  8209  
  8210  // CopySkipJob provides a copy of the allocation but doesn't deep copy the job
  8211  func (a *Allocation) CopySkipJob() *Allocation {
  8212  	return a.copyImpl(false)
  8213  }
  8214  
  8215  // Canonicalize Allocation to ensure fields are initialized to the expectations
  8216  // of this version of Nomad. Should be called when restoring persisted
  8217  // Allocations or receiving Allocations from Nomad agents potentially on an
  8218  // older version of Nomad.
  8219  func (a *Allocation) Canonicalize() {
  8220  	if a.AllocatedResources == nil && a.TaskResources != nil {
  8221  		ar := AllocatedResources{}
  8222  
  8223  		tasks := make(map[string]*AllocatedTaskResources, len(a.TaskResources))
  8224  		for name, tr := range a.TaskResources {
  8225  			atr := AllocatedTaskResources{}
  8226  			atr.Cpu.CpuShares = int64(tr.CPU)
  8227  			atr.Memory.MemoryMB = int64(tr.MemoryMB)
  8228  			atr.Networks = tr.Networks.Copy()
  8229  
  8230  			tasks[name] = &atr
  8231  		}
  8232  		ar.Tasks = tasks
  8233  
  8234  		if a.SharedResources != nil {
  8235  			ar.Shared.DiskMB = int64(a.SharedResources.DiskMB)
  8236  			ar.Shared.Networks = a.SharedResources.Networks.Copy()
  8237  		}
  8238  
  8239  		a.AllocatedResources = &ar
  8240  	}
  8241  
  8242  	a.Job.Canonicalize()
  8243  }
  8244  
  8245  func (a *Allocation) copyImpl(job bool) *Allocation {
  8246  	if a == nil {
  8247  		return nil
  8248  	}
  8249  	na := new(Allocation)
  8250  	*na = *a
  8251  
  8252  	if job {
  8253  		na.Job = na.Job.Copy()
  8254  	}
  8255  
  8256  	na.AllocatedResources = na.AllocatedResources.Copy()
  8257  	na.Resources = na.Resources.Copy()
  8258  	na.SharedResources = na.SharedResources.Copy()
  8259  
  8260  	if a.TaskResources != nil {
  8261  		tr := make(map[string]*Resources, len(na.TaskResources))
  8262  		for task, resource := range na.TaskResources {
  8263  			tr[task] = resource.Copy()
  8264  		}
  8265  		na.TaskResources = tr
  8266  	}
  8267  
  8268  	na.Metrics = na.Metrics.Copy()
  8269  	na.DeploymentStatus = na.DeploymentStatus.Copy()
  8270  
  8271  	if a.TaskStates != nil {
  8272  		ts := make(map[string]*TaskState, len(na.TaskStates))
  8273  		for task, state := range na.TaskStates {
  8274  			ts[task] = state.Copy()
  8275  		}
  8276  		na.TaskStates = ts
  8277  	}
  8278  
  8279  	na.RescheduleTracker = a.RescheduleTracker.Copy()
  8280  	na.PreemptedAllocations = helper.CopySliceString(a.PreemptedAllocations)
  8281  	return na
  8282  }
  8283  
  8284  // TerminalStatus returns if the desired or actual status is terminal and
  8285  // will no longer transition.
  8286  func (a *Allocation) TerminalStatus() bool {
  8287  	// First check the desired state and if that isn't terminal, check client
  8288  	// state.
  8289  	return a.ServerTerminalStatus() || a.ClientTerminalStatus()
  8290  }
  8291  
  8292  // ServerTerminalStatus returns true if the desired state of the allocation is terminal
  8293  func (a *Allocation) ServerTerminalStatus() bool {
  8294  	switch a.DesiredStatus {
  8295  	case AllocDesiredStatusStop, AllocDesiredStatusEvict:
  8296  		return true
  8297  	default:
  8298  		return false
  8299  	}
  8300  }
  8301  
  8302  // ClientTerminalStatus returns if the client status is terminal and will no longer transition
  8303  func (a *Allocation) ClientTerminalStatus() bool {
  8304  	switch a.ClientStatus {
  8305  	case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
  8306  		return true
  8307  	default:
  8308  		return false
  8309  	}
  8310  }
  8311  
  8312  // ShouldReschedule returns if the allocation is eligible to be rescheduled according
  8313  // to its status and ReschedulePolicy given its failure time
  8314  func (a *Allocation) ShouldReschedule(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
  8315  	// First check the desired state
  8316  	switch a.DesiredStatus {
  8317  	case AllocDesiredStatusStop, AllocDesiredStatusEvict:
  8318  		return false
  8319  	default:
  8320  	}
  8321  	switch a.ClientStatus {
  8322  	case AllocClientStatusFailed:
  8323  		return a.RescheduleEligible(reschedulePolicy, failTime)
  8324  	default:
  8325  		return false
  8326  	}
  8327  }
  8328  
  8329  // RescheduleEligible returns if the allocation is eligible to be rescheduled according
  8330  // to its ReschedulePolicy and the current state of its reschedule trackers
  8331  func (a *Allocation) RescheduleEligible(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
  8332  	if reschedulePolicy == nil {
  8333  		return false
  8334  	}
  8335  	attempts := reschedulePolicy.Attempts
  8336  	interval := reschedulePolicy.Interval
  8337  	enabled := attempts > 0 || reschedulePolicy.Unlimited
  8338  	if !enabled {
  8339  		return false
  8340  	}
  8341  	if reschedulePolicy.Unlimited {
  8342  		return true
  8343  	}
  8344  	// Early return true if there are no attempts yet and the number of allowed attempts is > 0
  8345  	if (a.RescheduleTracker == nil || len(a.RescheduleTracker.Events) == 0) && attempts > 0 {
  8346  		return true
  8347  	}
  8348  	attempted := 0
  8349  	for j := len(a.RescheduleTracker.Events) - 1; j >= 0; j-- {
  8350  		lastAttempt := a.RescheduleTracker.Events[j].RescheduleTime
  8351  		timeDiff := failTime.UTC().UnixNano() - lastAttempt
  8352  		if timeDiff < interval.Nanoseconds() {
  8353  			attempted += 1
  8354  		}
  8355  	}
  8356  	return attempted < attempts
  8357  }
  8358  
  8359  // LastEventTime is the time of the last task event in the allocation.
  8360  // It is used to determine allocation failure time. If the FinishedAt field
  8361  // is not set, the alloc's modify time is used
  8362  func (a *Allocation) LastEventTime() time.Time {
  8363  	var lastEventTime time.Time
  8364  	if a.TaskStates != nil {
  8365  		for _, s := range a.TaskStates {
  8366  			if lastEventTime.IsZero() || s.FinishedAt.After(lastEventTime) {
  8367  				lastEventTime = s.FinishedAt
  8368  			}
  8369  		}
  8370  	}
  8371  
  8372  	if lastEventTime.IsZero() {
  8373  		return time.Unix(0, a.ModifyTime).UTC()
  8374  	}
  8375  	return lastEventTime
  8376  }
  8377  
  8378  // ReschedulePolicy returns the reschedule policy based on the task group
  8379  func (a *Allocation) ReschedulePolicy() *ReschedulePolicy {
  8380  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
  8381  	if tg == nil {
  8382  		return nil
  8383  	}
  8384  	return tg.ReschedulePolicy
  8385  }
  8386  
  8387  // NextRescheduleTime returns a time on or after which the allocation is eligible to be rescheduled,
  8388  // and whether the next reschedule time is within policy's interval if the policy doesn't allow unlimited reschedules
  8389  func (a *Allocation) NextRescheduleTime() (time.Time, bool) {
  8390  	failTime := a.LastEventTime()
  8391  	reschedulePolicy := a.ReschedulePolicy()
  8392  	if a.DesiredStatus == AllocDesiredStatusStop || a.ClientStatus != AllocClientStatusFailed || failTime.IsZero() || reschedulePolicy == nil {
  8393  		return time.Time{}, false
  8394  	}
  8395  
  8396  	nextDelay := a.NextDelay()
  8397  	nextRescheduleTime := failTime.Add(nextDelay)
  8398  	rescheduleEligible := reschedulePolicy.Unlimited || (reschedulePolicy.Attempts > 0 && a.RescheduleTracker == nil)
  8399  	if reschedulePolicy.Attempts > 0 && a.RescheduleTracker != nil && a.RescheduleTracker.Events != nil {
  8400  		// Check for eligibility based on the interval if max attempts is set
  8401  		attempted := 0
  8402  		for j := len(a.RescheduleTracker.Events) - 1; j >= 0; j-- {
  8403  			lastAttempt := a.RescheduleTracker.Events[j].RescheduleTime
  8404  			timeDiff := failTime.UTC().UnixNano() - lastAttempt
  8405  			if timeDiff < reschedulePolicy.Interval.Nanoseconds() {
  8406  				attempted += 1
  8407  			}
  8408  		}
  8409  		rescheduleEligible = attempted < reschedulePolicy.Attempts && nextDelay < reschedulePolicy.Interval
  8410  	}
  8411  	return nextRescheduleTime, rescheduleEligible
  8412  }
  8413  
  8414  // NextDelay returns a duration after which the allocation can be rescheduled.
  8415  // It is calculated according to the delay function and previous reschedule attempts.
  8416  func (a *Allocation) NextDelay() time.Duration {
  8417  	policy := a.ReschedulePolicy()
  8418  	// Can be nil if the task group was updated to remove its reschedule policy
  8419  	if policy == nil {
  8420  		return 0
  8421  	}
  8422  	delayDur := policy.Delay
  8423  	if a.RescheduleTracker == nil || a.RescheduleTracker.Events == nil || len(a.RescheduleTracker.Events) == 0 {
  8424  		return delayDur
  8425  	}
  8426  	events := a.RescheduleTracker.Events
  8427  	switch policy.DelayFunction {
  8428  	case "exponential":
  8429  		delayDur = a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1].Delay * 2
  8430  	case "fibonacci":
  8431  		if len(events) >= 2 {
  8432  			fibN1Delay := events[len(events)-1].Delay
  8433  			fibN2Delay := events[len(events)-2].Delay
  8434  			// Handle reset of delay ceiling which should cause
  8435  			// a new series to start
  8436  			if fibN2Delay == policy.MaxDelay && fibN1Delay == policy.Delay {
  8437  				delayDur = fibN1Delay
  8438  			} else {
  8439  				delayDur = fibN1Delay + fibN2Delay
  8440  			}
  8441  		}
  8442  	default:
  8443  		return delayDur
  8444  	}
  8445  	if policy.MaxDelay > 0 && delayDur > policy.MaxDelay {
  8446  		delayDur = policy.MaxDelay
  8447  		// check if delay needs to be reset
  8448  
  8449  		lastRescheduleEvent := a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1]
  8450  		timeDiff := a.LastEventTime().UTC().UnixNano() - lastRescheduleEvent.RescheduleTime
  8451  		if timeDiff > delayDur.Nanoseconds() {
  8452  			delayDur = policy.Delay
  8453  		}
  8454  
  8455  	}
  8456  
  8457  	return delayDur
  8458  }
  8459  
  8460  // Terminated returns if the allocation is in a terminal state on a client.
  8461  func (a *Allocation) Terminated() bool {
  8462  	if a.ClientStatus == AllocClientStatusFailed ||
  8463  		a.ClientStatus == AllocClientStatusComplete ||
  8464  		a.ClientStatus == AllocClientStatusLost {
  8465  		return true
  8466  	}
  8467  	return false
  8468  }
  8469  
  8470  // RanSuccessfully returns whether the client has ran the allocation and all
  8471  // tasks finished successfully. Critically this function returns whether the
  8472  // allocation has ran to completion and not just that the alloc has converged to
  8473  // its desired state. That is to say that a batch allocation must have finished
  8474  // with exit code 0 on all task groups. This doesn't really have meaning on a
  8475  // non-batch allocation because a service and system allocation should not
  8476  // finish.
  8477  func (a *Allocation) RanSuccessfully() bool {
  8478  	// Handle the case the client hasn't started the allocation.
  8479  	if len(a.TaskStates) == 0 {
  8480  		return false
  8481  	}
  8482  
  8483  	// Check to see if all the tasks finished successfully in the allocation
  8484  	allSuccess := true
  8485  	for _, state := range a.TaskStates {
  8486  		allSuccess = allSuccess && state.Successful()
  8487  	}
  8488  
  8489  	return allSuccess
  8490  }
  8491  
  8492  // ShouldMigrate returns if the allocation needs data migration
  8493  func (a *Allocation) ShouldMigrate() bool {
  8494  	if a.PreviousAllocation == "" {
  8495  		return false
  8496  	}
  8497  
  8498  	if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict {
  8499  		return false
  8500  	}
  8501  
  8502  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
  8503  
  8504  	// if the task group is nil or the ephemeral disk block isn't present then
  8505  	// we won't migrate
  8506  	if tg == nil || tg.EphemeralDisk == nil {
  8507  		return false
  8508  	}
  8509  
  8510  	// We won't migrate any data is the user hasn't enabled migration or the
  8511  	// disk is not marked as sticky
  8512  	if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky {
  8513  		return false
  8514  	}
  8515  
  8516  	return true
  8517  }
  8518  
  8519  // SetEventDisplayMessage populates the display message if its not already set,
  8520  // a temporary fix to handle old allocations that don't have it.
  8521  // This method will be removed in a future release.
  8522  func (a *Allocation) SetEventDisplayMessages() {
  8523  	setDisplayMsg(a.TaskStates)
  8524  }
  8525  
  8526  // COMPAT(0.11): Remove in 0.11
  8527  // ComparableResources returns the resources on the allocation
  8528  // handling upgrade paths. After 0.11 calls to this should be replaced with:
  8529  // alloc.AllocatedResources.Comparable()
  8530  func (a *Allocation) ComparableResources() *ComparableResources {
  8531  	// ALloc already has 0.9+ behavior
  8532  	if a.AllocatedResources != nil {
  8533  		return a.AllocatedResources.Comparable()
  8534  	}
  8535  
  8536  	var resources *Resources
  8537  	if a.Resources != nil {
  8538  		resources = a.Resources
  8539  	} else if a.TaskResources != nil {
  8540  		resources = new(Resources)
  8541  		resources.Add(a.SharedResources)
  8542  		for _, taskResource := range a.TaskResources {
  8543  			resources.Add(taskResource)
  8544  		}
  8545  	}
  8546  
  8547  	// Upgrade path
  8548  	return &ComparableResources{
  8549  		Flattened: AllocatedTaskResources{
  8550  			Cpu: AllocatedCpuResources{
  8551  				CpuShares: int64(resources.CPU),
  8552  			},
  8553  			Memory: AllocatedMemoryResources{
  8554  				MemoryMB: int64(resources.MemoryMB),
  8555  			},
  8556  			Networks: resources.Networks,
  8557  		},
  8558  		Shared: AllocatedSharedResources{
  8559  			DiskMB: int64(resources.DiskMB),
  8560  		},
  8561  	}
  8562  }
  8563  
  8564  // LookupTask by name from the Allocation. Returns nil if the Job is not set, the
  8565  // TaskGroup does not exist, or the task name cannot be found.
  8566  func (a *Allocation) LookupTask(name string) *Task {
  8567  	if a.Job == nil {
  8568  		return nil
  8569  	}
  8570  
  8571  	tg := a.Job.LookupTaskGroup(a.TaskGroup)
  8572  	if tg == nil {
  8573  		return nil
  8574  	}
  8575  
  8576  	return tg.LookupTask(name)
  8577  }
  8578  
  8579  // Stub returns a list stub for the allocation
  8580  func (a *Allocation) Stub() *AllocListStub {
  8581  	return &AllocListStub{
  8582  		ID:                    a.ID,
  8583  		EvalID:                a.EvalID,
  8584  		Name:                  a.Name,
  8585  		Namespace:             a.Namespace,
  8586  		NodeID:                a.NodeID,
  8587  		NodeName:              a.NodeName,
  8588  		JobID:                 a.JobID,
  8589  		JobType:               a.Job.Type,
  8590  		JobVersion:            a.Job.Version,
  8591  		TaskGroup:             a.TaskGroup,
  8592  		DesiredStatus:         a.DesiredStatus,
  8593  		DesiredDescription:    a.DesiredDescription,
  8594  		ClientStatus:          a.ClientStatus,
  8595  		ClientDescription:     a.ClientDescription,
  8596  		DesiredTransition:     a.DesiredTransition,
  8597  		TaskStates:            a.TaskStates,
  8598  		DeploymentStatus:      a.DeploymentStatus,
  8599  		FollowupEvalID:        a.FollowupEvalID,
  8600  		RescheduleTracker:     a.RescheduleTracker,
  8601  		PreemptedAllocations:  a.PreemptedAllocations,
  8602  		PreemptedByAllocation: a.PreemptedByAllocation,
  8603  		CreateIndex:           a.CreateIndex,
  8604  		ModifyIndex:           a.ModifyIndex,
  8605  		CreateTime:            a.CreateTime,
  8606  		ModifyTime:            a.ModifyTime,
  8607  	}
  8608  }
  8609  
  8610  // AllocationDiff converts an Allocation type to an AllocationDiff type
  8611  // If at any time, modification are made to AllocationDiff so that an
  8612  // Allocation can no longer be safely converted to AllocationDiff,
  8613  // this method should be changed accordingly.
  8614  func (a *Allocation) AllocationDiff() *AllocationDiff {
  8615  	return (*AllocationDiff)(a)
  8616  }
  8617  
  8618  // AllocationDiff is another named type for Allocation (to use the same fields),
  8619  // which is used to represent the delta for an Allocation. If you need a method
  8620  // defined on the al
  8621  type AllocationDiff Allocation
  8622  
  8623  // AllocListStub is used to return a subset of alloc information
  8624  type AllocListStub struct {
  8625  	ID                    string
  8626  	EvalID                string
  8627  	Name                  string
  8628  	Namespace             string
  8629  	NodeID                string
  8630  	NodeName              string
  8631  	JobID                 string
  8632  	JobType               string
  8633  	JobVersion            uint64
  8634  	TaskGroup             string
  8635  	DesiredStatus         string
  8636  	DesiredDescription    string
  8637  	ClientStatus          string
  8638  	ClientDescription     string
  8639  	DesiredTransition     DesiredTransition
  8640  	TaskStates            map[string]*TaskState
  8641  	DeploymentStatus      *AllocDeploymentStatus
  8642  	FollowupEvalID        string
  8643  	RescheduleTracker     *RescheduleTracker
  8644  	PreemptedAllocations  []string
  8645  	PreemptedByAllocation string
  8646  	CreateIndex           uint64
  8647  	ModifyIndex           uint64
  8648  	CreateTime            int64
  8649  	ModifyTime            int64
  8650  }
  8651  
  8652  // SetEventDisplayMessage populates the display message if its not already set,
  8653  // a temporary fix to handle old allocations that don't have it.
  8654  // This method will be removed in a future release.
  8655  func (a *AllocListStub) SetEventDisplayMessages() {
  8656  	setDisplayMsg(a.TaskStates)
  8657  }
  8658  
  8659  func setDisplayMsg(taskStates map[string]*TaskState) {
  8660  	if taskStates != nil {
  8661  		for _, taskState := range taskStates {
  8662  			for _, event := range taskState.Events {
  8663  				event.PopulateEventDisplayMessage()
  8664  			}
  8665  		}
  8666  	}
  8667  }
  8668  
  8669  // AllocMetric is used to track various metrics while attempting
  8670  // to make an allocation. These are used to debug a job, or to better
  8671  // understand the pressure within the system.
  8672  type AllocMetric struct {
  8673  	// NodesEvaluated is the number of nodes that were evaluated
  8674  	NodesEvaluated int
  8675  
  8676  	// NodesFiltered is the number of nodes filtered due to a constraint
  8677  	NodesFiltered int
  8678  
  8679  	// NodesAvailable is the number of nodes available for evaluation per DC.
  8680  	NodesAvailable map[string]int
  8681  
  8682  	// ClassFiltered is the number of nodes filtered by class
  8683  	ClassFiltered map[string]int
  8684  
  8685  	// ConstraintFiltered is the number of failures caused by constraint
  8686  	ConstraintFiltered map[string]int
  8687  
  8688  	// NodesExhausted is the number of nodes skipped due to being
  8689  	// exhausted of at least one resource
  8690  	NodesExhausted int
  8691  
  8692  	// ClassExhausted is the number of nodes exhausted by class
  8693  	ClassExhausted map[string]int
  8694  
  8695  	// DimensionExhausted provides the count by dimension or reason
  8696  	DimensionExhausted map[string]int
  8697  
  8698  	// QuotaExhausted provides the exhausted dimensions
  8699  	QuotaExhausted []string
  8700  
  8701  	// Scores is the scores of the final few nodes remaining
  8702  	// for placement. The top score is typically selected.
  8703  	// Deprecated: Replaced by ScoreMetaData in Nomad 0.9
  8704  	Scores map[string]float64
  8705  
  8706  	// ScoreMetaData is a slice of top scoring nodes displayed in the CLI
  8707  	ScoreMetaData []*NodeScoreMeta
  8708  
  8709  	// nodeScoreMeta is used to keep scores for a single node id. It is cleared out after
  8710  	// we receive normalized score during the last step of the scoring stack.
  8711  	nodeScoreMeta *NodeScoreMeta
  8712  
  8713  	// topScores is used to maintain a heap of the top K nodes with
  8714  	// the highest normalized score
  8715  	topScores *kheap.ScoreHeap
  8716  
  8717  	// AllocationTime is a measure of how long the allocation
  8718  	// attempt took. This can affect performance and SLAs.
  8719  	AllocationTime time.Duration
  8720  
  8721  	// CoalescedFailures indicates the number of other
  8722  	// allocations that were coalesced into this failed allocation.
  8723  	// This is to prevent creating many failed allocations for a
  8724  	// single task group.
  8725  	CoalescedFailures int
  8726  }
  8727  
  8728  func (a *AllocMetric) Copy() *AllocMetric {
  8729  	if a == nil {
  8730  		return nil
  8731  	}
  8732  	na := new(AllocMetric)
  8733  	*na = *a
  8734  	na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable)
  8735  	na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered)
  8736  	na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered)
  8737  	na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted)
  8738  	na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted)
  8739  	na.QuotaExhausted = helper.CopySliceString(na.QuotaExhausted)
  8740  	na.Scores = helper.CopyMapStringFloat64(na.Scores)
  8741  	na.ScoreMetaData = CopySliceNodeScoreMeta(na.ScoreMetaData)
  8742  	return na
  8743  }
  8744  
  8745  func (a *AllocMetric) EvaluateNode() {
  8746  	a.NodesEvaluated += 1
  8747  }
  8748  
  8749  func (a *AllocMetric) FilterNode(node *Node, constraint string) {
  8750  	a.NodesFiltered += 1
  8751  	if node != nil && node.NodeClass != "" {
  8752  		if a.ClassFiltered == nil {
  8753  			a.ClassFiltered = make(map[string]int)
  8754  		}
  8755  		a.ClassFiltered[node.NodeClass] += 1
  8756  	}
  8757  	if constraint != "" {
  8758  		if a.ConstraintFiltered == nil {
  8759  			a.ConstraintFiltered = make(map[string]int)
  8760  		}
  8761  		a.ConstraintFiltered[constraint] += 1
  8762  	}
  8763  }
  8764  
  8765  func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
  8766  	a.NodesExhausted += 1
  8767  	if node != nil && node.NodeClass != "" {
  8768  		if a.ClassExhausted == nil {
  8769  			a.ClassExhausted = make(map[string]int)
  8770  		}
  8771  		a.ClassExhausted[node.NodeClass] += 1
  8772  	}
  8773  	if dimension != "" {
  8774  		if a.DimensionExhausted == nil {
  8775  			a.DimensionExhausted = make(map[string]int)
  8776  		}
  8777  		a.DimensionExhausted[dimension] += 1
  8778  	}
  8779  }
  8780  
  8781  func (a *AllocMetric) ExhaustQuota(dimensions []string) {
  8782  	if a.QuotaExhausted == nil {
  8783  		a.QuotaExhausted = make([]string, 0, len(dimensions))
  8784  	}
  8785  
  8786  	a.QuotaExhausted = append(a.QuotaExhausted, dimensions...)
  8787  }
  8788  
  8789  // ScoreNode is used to gather top K scoring nodes in a heap
  8790  func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
  8791  	// Create nodeScoreMeta lazily if its the first time or if its a new node
  8792  	if a.nodeScoreMeta == nil || a.nodeScoreMeta.NodeID != node.ID {
  8793  		a.nodeScoreMeta = &NodeScoreMeta{
  8794  			NodeID: node.ID,
  8795  			Scores: make(map[string]float64),
  8796  		}
  8797  	}
  8798  	if name == NormScorerName {
  8799  		a.nodeScoreMeta.NormScore = score
  8800  		// Once we have the normalized score we can push to the heap
  8801  		// that tracks top K by normalized score
  8802  
  8803  		// Create the heap if its not there already
  8804  		if a.topScores == nil {
  8805  			a.topScores = kheap.NewScoreHeap(MaxRetainedNodeScores)
  8806  		}
  8807  		heap.Push(a.topScores, a.nodeScoreMeta)
  8808  
  8809  		// Clear out this entry because its now in the heap
  8810  		a.nodeScoreMeta = nil
  8811  	} else {
  8812  		a.nodeScoreMeta.Scores[name] = score
  8813  	}
  8814  }
  8815  
  8816  // PopulateScoreMetaData populates a map of scorer to scoring metadata
  8817  // The map is populated by popping elements from a heap of top K scores
  8818  // maintained per scorer
  8819  func (a *AllocMetric) PopulateScoreMetaData() {
  8820  	if a.topScores == nil {
  8821  		return
  8822  	}
  8823  
  8824  	if a.ScoreMetaData == nil {
  8825  		a.ScoreMetaData = make([]*NodeScoreMeta, a.topScores.Len())
  8826  	}
  8827  	heapItems := a.topScores.GetItemsReverse()
  8828  	for i, item := range heapItems {
  8829  		a.ScoreMetaData[i] = item.(*NodeScoreMeta)
  8830  	}
  8831  }
  8832  
  8833  // NodeScoreMeta captures scoring meta data derived from
  8834  // different scoring factors.
  8835  type NodeScoreMeta struct {
  8836  	NodeID    string
  8837  	Scores    map[string]float64
  8838  	NormScore float64
  8839  }
  8840  
  8841  func (s *NodeScoreMeta) Copy() *NodeScoreMeta {
  8842  	if s == nil {
  8843  		return nil
  8844  	}
  8845  	ns := new(NodeScoreMeta)
  8846  	*ns = *s
  8847  	return ns
  8848  }
  8849  
  8850  func (s *NodeScoreMeta) String() string {
  8851  	return fmt.Sprintf("%s %f %v", s.NodeID, s.NormScore, s.Scores)
  8852  }
  8853  
  8854  func (s *NodeScoreMeta) Score() float64 {
  8855  	return s.NormScore
  8856  }
  8857  
  8858  func (s *NodeScoreMeta) Data() interface{} {
  8859  	return s
  8860  }
  8861  
  8862  // AllocDeploymentStatus captures the status of the allocation as part of the
  8863  // deployment. This can include things like if the allocation has been marked as
  8864  // healthy.
  8865  type AllocDeploymentStatus struct {
  8866  	// Healthy marks whether the allocation has been marked healthy or unhealthy
  8867  	// as part of a deployment. It can be unset if it has neither been marked
  8868  	// healthy or unhealthy.
  8869  	Healthy *bool
  8870  
  8871  	// Timestamp is the time at which the health status was set.
  8872  	Timestamp time.Time
  8873  
  8874  	// Canary marks whether the allocation is a canary or not. A canary that has
  8875  	// been promoted will have this field set to false.
  8876  	Canary bool
  8877  
  8878  	// ModifyIndex is the raft index in which the deployment status was last
  8879  	// changed.
  8880  	ModifyIndex uint64
  8881  }
  8882  
  8883  // HasHealth returns true if the allocation has its health set.
  8884  func (a *AllocDeploymentStatus) HasHealth() bool {
  8885  	return a != nil && a.Healthy != nil
  8886  }
  8887  
  8888  // IsHealthy returns if the allocation is marked as healthy as part of a
  8889  // deployment
  8890  func (a *AllocDeploymentStatus) IsHealthy() bool {
  8891  	if a == nil {
  8892  		return false
  8893  	}
  8894  
  8895  	return a.Healthy != nil && *a.Healthy
  8896  }
  8897  
  8898  // IsUnhealthy returns if the allocation is marked as unhealthy as part of a
  8899  // deployment
  8900  func (a *AllocDeploymentStatus) IsUnhealthy() bool {
  8901  	if a == nil {
  8902  		return false
  8903  	}
  8904  
  8905  	return a.Healthy != nil && !*a.Healthy
  8906  }
  8907  
  8908  // IsCanary returns if the allocation is marked as a canary
  8909  func (a *AllocDeploymentStatus) IsCanary() bool {
  8910  	if a == nil {
  8911  		return false
  8912  	}
  8913  
  8914  	return a.Canary
  8915  }
  8916  
  8917  func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus {
  8918  	if a == nil {
  8919  		return nil
  8920  	}
  8921  
  8922  	c := new(AllocDeploymentStatus)
  8923  	*c = *a
  8924  
  8925  	if a.Healthy != nil {
  8926  		c.Healthy = helper.BoolToPtr(*a.Healthy)
  8927  	}
  8928  
  8929  	return c
  8930  }
  8931  
  8932  const (
  8933  	EvalStatusBlocked   = "blocked"
  8934  	EvalStatusPending   = "pending"
  8935  	EvalStatusComplete  = "complete"
  8936  	EvalStatusFailed    = "failed"
  8937  	EvalStatusCancelled = "canceled"
  8938  )
  8939  
  8940  const (
  8941  	EvalTriggerJobRegister       = "job-register"
  8942  	EvalTriggerJobDeregister     = "job-deregister"
  8943  	EvalTriggerPeriodicJob       = "periodic-job"
  8944  	EvalTriggerNodeDrain         = "node-drain"
  8945  	EvalTriggerNodeUpdate        = "node-update"
  8946  	EvalTriggerAllocStop         = "alloc-stop"
  8947  	EvalTriggerScheduled         = "scheduled"
  8948  	EvalTriggerRollingUpdate     = "rolling-update"
  8949  	EvalTriggerDeploymentWatcher = "deployment-watcher"
  8950  	EvalTriggerFailedFollowUp    = "failed-follow-up"
  8951  	EvalTriggerMaxPlans          = "max-plan-attempts"
  8952  	EvalTriggerRetryFailedAlloc  = "alloc-failure"
  8953  	EvalTriggerQueuedAllocs      = "queued-allocs"
  8954  	EvalTriggerPreemption        = "preemption"
  8955  	EvalTriggerScaling           = "job-scaling"
  8956  )
  8957  
  8958  const (
  8959  	// CoreJobEvalGC is used for the garbage collection of evaluations
  8960  	// and allocations. We periodically scan evaluations in a terminal state,
  8961  	// in which all the corresponding allocations are also terminal. We
  8962  	// delete these out of the system to bound the state.
  8963  	CoreJobEvalGC = "eval-gc"
  8964  
  8965  	// CoreJobNodeGC is used for the garbage collection of failed nodes.
  8966  	// We periodically scan nodes in a terminal state, and if they have no
  8967  	// corresponding allocations we delete these out of the system.
  8968  	CoreJobNodeGC = "node-gc"
  8969  
  8970  	// CoreJobJobGC is used for the garbage collection of eligible jobs. We
  8971  	// periodically scan garbage collectible jobs and check if both their
  8972  	// evaluations and allocations are terminal. If so, we delete these out of
  8973  	// the system.
  8974  	CoreJobJobGC = "job-gc"
  8975  
  8976  	// CoreJobDeploymentGC is used for the garbage collection of eligible
  8977  	// deployments. We periodically scan garbage collectible deployments and
  8978  	// check if they are terminal. If so, we delete these out of the system.
  8979  	CoreJobDeploymentGC = "deployment-gc"
  8980  
  8981  	// CoreJobCSIVolumeClaimGC is use for the garbage collection of CSI
  8982  	// volume claims. We periodically scan volumes to see if no allocs are
  8983  	// claiming them. If so, we unclaim the volume.
  8984  	CoreJobCSIVolumeClaimGC = "csi-volume-claim-gc"
  8985  
  8986  	// CoreJobForceGC is used to force garbage collection of all GCable objects.
  8987  	CoreJobForceGC = "force-gc"
  8988  )
  8989  
  8990  // Evaluation is used anytime we need to apply business logic as a result
  8991  // of a change to our desired state (job specification) or the emergent state
  8992  // (registered nodes). When the inputs change, we need to "evaluate" them,
  8993  // potentially taking action (allocation of work) or doing nothing if the state
  8994  // of the world does not require it.
  8995  type Evaluation struct {
  8996  	// msgpack omit empty fields during serialization
  8997  	_struct bool `codec:",omitempty"` // nolint: structcheck
  8998  
  8999  	// ID is a randomly generated UUID used for this evaluation. This
  9000  	// is assigned upon the creation of the evaluation.
  9001  	ID string
  9002  
  9003  	// Namespace is the namespace the evaluation is created in
  9004  	Namespace string
  9005  
  9006  	// Priority is used to control scheduling importance and if this job
  9007  	// can preempt other jobs.
  9008  	Priority int
  9009  
  9010  	// Type is used to control which schedulers are available to handle
  9011  	// this evaluation.
  9012  	Type string
  9013  
  9014  	// TriggeredBy is used to give some insight into why this Eval
  9015  	// was created. (Job change, node failure, alloc failure, etc).
  9016  	TriggeredBy string
  9017  
  9018  	// JobID is the job this evaluation is scoped to. Evaluations cannot
  9019  	// be run in parallel for a given JobID, so we serialize on this.
  9020  	JobID string
  9021  
  9022  	// JobModifyIndex is the modify index of the job at the time
  9023  	// the evaluation was created
  9024  	JobModifyIndex uint64
  9025  
  9026  	// NodeID is the node that was affected triggering the evaluation.
  9027  	NodeID string
  9028  
  9029  	// NodeModifyIndex is the modify index of the node at the time
  9030  	// the evaluation was created
  9031  	NodeModifyIndex uint64
  9032  
  9033  	// DeploymentID is the ID of the deployment that triggered the evaluation.
  9034  	DeploymentID string
  9035  
  9036  	// Status of the evaluation
  9037  	Status string
  9038  
  9039  	// StatusDescription is meant to provide more human useful information
  9040  	StatusDescription string
  9041  
  9042  	// Wait is a minimum wait time for running the eval. This is used to
  9043  	// support a rolling upgrade in versions prior to 0.7.0
  9044  	// Deprecated
  9045  	Wait time.Duration
  9046  
  9047  	// WaitUntil is the time when this eval should be run. This is used to
  9048  	// supported delayed rescheduling of failed allocations
  9049  	WaitUntil time.Time
  9050  
  9051  	// NextEval is the evaluation ID for the eval created to do a followup.
  9052  	// This is used to support rolling upgrades and failed-follow-up evals, where
  9053  	// we need a chain of evaluations.
  9054  	NextEval string
  9055  
  9056  	// PreviousEval is the evaluation ID for the eval creating this one to do a followup.
  9057  	// This is used to support rolling upgrades and failed-follow-up evals, where
  9058  	// we need a chain of evaluations.
  9059  	PreviousEval string
  9060  
  9061  	// BlockedEval is the evaluation ID for a created blocked eval. A
  9062  	// blocked eval will be created if all allocations could not be placed due
  9063  	// to constraints or lacking resources.
  9064  	BlockedEval string
  9065  
  9066  	// FailedTGAllocs are task groups which have allocations that could not be
  9067  	// made, but the metrics are persisted so that the user can use the feedback
  9068  	// to determine the cause.
  9069  	FailedTGAllocs map[string]*AllocMetric
  9070  
  9071  	// ClassEligibility tracks computed node classes that have been explicitly
  9072  	// marked as eligible or ineligible.
  9073  	ClassEligibility map[string]bool
  9074  
  9075  	// QuotaLimitReached marks whether a quota limit was reached for the
  9076  	// evaluation.
  9077  	QuotaLimitReached string
  9078  
  9079  	// EscapedComputedClass marks whether the job has constraints that are not
  9080  	// captured by computed node classes.
  9081  	EscapedComputedClass bool
  9082  
  9083  	// AnnotatePlan triggers the scheduler to provide additional annotations
  9084  	// during the evaluation. This should not be set during normal operations.
  9085  	AnnotatePlan bool
  9086  
  9087  	// QueuedAllocations is the number of unplaced allocations at the time the
  9088  	// evaluation was processed. The map is keyed by Task Group names.
  9089  	QueuedAllocations map[string]int
  9090  
  9091  	// LeaderACL provides the ACL token to when issuing RPCs back to the
  9092  	// leader. This will be a valid management token as long as the leader is
  9093  	// active. This should not ever be exposed via the API.
  9094  	LeaderACL string
  9095  
  9096  	// SnapshotIndex is the Raft index of the snapshot used to process the
  9097  	// evaluation. The index will either be set when it has gone through the
  9098  	// scheduler or if a blocked evaluation is being created. The index is set
  9099  	// in this case so we can determine if an early unblocking is required since
  9100  	// capacity has changed since the evaluation was created. This can result in
  9101  	// the SnapshotIndex being less than the CreateIndex.
  9102  	SnapshotIndex uint64
  9103  
  9104  	// Raft Indexes
  9105  	CreateIndex uint64
  9106  	ModifyIndex uint64
  9107  
  9108  	CreateTime int64
  9109  	ModifyTime int64
  9110  }
  9111  
  9112  // TerminalStatus returns if the current status is terminal and
  9113  // will no longer transition.
  9114  func (e *Evaluation) TerminalStatus() bool {
  9115  	switch e.Status {
  9116  	case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled:
  9117  		return true
  9118  	default:
  9119  		return false
  9120  	}
  9121  }
  9122  
  9123  func (e *Evaluation) GoString() string {
  9124  	return fmt.Sprintf("<Eval %q JobID: %q Namespace: %q>", e.ID, e.JobID, e.Namespace)
  9125  }
  9126  
  9127  func (e *Evaluation) Copy() *Evaluation {
  9128  	if e == nil {
  9129  		return nil
  9130  	}
  9131  	ne := new(Evaluation)
  9132  	*ne = *e
  9133  
  9134  	// Copy ClassEligibility
  9135  	if e.ClassEligibility != nil {
  9136  		classes := make(map[string]bool, len(e.ClassEligibility))
  9137  		for class, elig := range e.ClassEligibility {
  9138  			classes[class] = elig
  9139  		}
  9140  		ne.ClassEligibility = classes
  9141  	}
  9142  
  9143  	// Copy FailedTGAllocs
  9144  	if e.FailedTGAllocs != nil {
  9145  		failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs))
  9146  		for tg, metric := range e.FailedTGAllocs {
  9147  			failedTGs[tg] = metric.Copy()
  9148  		}
  9149  		ne.FailedTGAllocs = failedTGs
  9150  	}
  9151  
  9152  	// Copy queued allocations
  9153  	if e.QueuedAllocations != nil {
  9154  		queuedAllocations := make(map[string]int, len(e.QueuedAllocations))
  9155  		for tg, num := range e.QueuedAllocations {
  9156  			queuedAllocations[tg] = num
  9157  		}
  9158  		ne.QueuedAllocations = queuedAllocations
  9159  	}
  9160  
  9161  	return ne
  9162  }
  9163  
  9164  // ShouldEnqueue checks if a given evaluation should be enqueued into the
  9165  // eval_broker
  9166  func (e *Evaluation) ShouldEnqueue() bool {
  9167  	switch e.Status {
  9168  	case EvalStatusPending:
  9169  		return true
  9170  	case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled:
  9171  		return false
  9172  	default:
  9173  		panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
  9174  	}
  9175  }
  9176  
  9177  // ShouldBlock checks if a given evaluation should be entered into the blocked
  9178  // eval tracker.
  9179  func (e *Evaluation) ShouldBlock() bool {
  9180  	switch e.Status {
  9181  	case EvalStatusBlocked:
  9182  		return true
  9183  	case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled:
  9184  		return false
  9185  	default:
  9186  		panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
  9187  	}
  9188  }
  9189  
  9190  // MakePlan is used to make a plan from the given evaluation
  9191  // for a given Job
  9192  func (e *Evaluation) MakePlan(j *Job) *Plan {
  9193  	p := &Plan{
  9194  		EvalID:          e.ID,
  9195  		Priority:        e.Priority,
  9196  		Job:             j,
  9197  		NodeUpdate:      make(map[string][]*Allocation),
  9198  		NodeAllocation:  make(map[string][]*Allocation),
  9199  		NodePreemptions: make(map[string][]*Allocation),
  9200  	}
  9201  	if j != nil {
  9202  		p.AllAtOnce = j.AllAtOnce
  9203  	}
  9204  	return p
  9205  }
  9206  
  9207  // NextRollingEval creates an evaluation to followup this eval for rolling updates
  9208  func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
  9209  	now := time.Now().UTC().UnixNano()
  9210  	return &Evaluation{
  9211  		ID:             uuid.Generate(),
  9212  		Namespace:      e.Namespace,
  9213  		Priority:       e.Priority,
  9214  		Type:           e.Type,
  9215  		TriggeredBy:    EvalTriggerRollingUpdate,
  9216  		JobID:          e.JobID,
  9217  		JobModifyIndex: e.JobModifyIndex,
  9218  		Status:         EvalStatusPending,
  9219  		Wait:           wait,
  9220  		PreviousEval:   e.ID,
  9221  		CreateTime:     now,
  9222  		ModifyTime:     now,
  9223  	}
  9224  }
  9225  
  9226  // CreateBlockedEval creates a blocked evaluation to followup this eval to place any
  9227  // failed allocations. It takes the classes marked explicitly eligible or
  9228  // ineligible, whether the job has escaped computed node classes and whether the
  9229  // quota limit was reached.
  9230  func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool,
  9231  	escaped bool, quotaReached string) *Evaluation {
  9232  	now := time.Now().UTC().UnixNano()
  9233  	return &Evaluation{
  9234  		ID:                   uuid.Generate(),
  9235  		Namespace:            e.Namespace,
  9236  		Priority:             e.Priority,
  9237  		Type:                 e.Type,
  9238  		TriggeredBy:          EvalTriggerQueuedAllocs,
  9239  		JobID:                e.JobID,
  9240  		JobModifyIndex:       e.JobModifyIndex,
  9241  		Status:               EvalStatusBlocked,
  9242  		PreviousEval:         e.ID,
  9243  		ClassEligibility:     classEligibility,
  9244  		EscapedComputedClass: escaped,
  9245  		QuotaLimitReached:    quotaReached,
  9246  		CreateTime:           now,
  9247  		ModifyTime:           now,
  9248  	}
  9249  }
  9250  
  9251  // CreateFailedFollowUpEval creates a follow up evaluation when the current one
  9252  // has been marked as failed because it has hit the delivery limit and will not
  9253  // be retried by the eval_broker. Callers should copy the created eval's ID to
  9254  // into the old eval's NextEval field.
  9255  func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
  9256  	now := time.Now().UTC().UnixNano()
  9257  	return &Evaluation{
  9258  		ID:             uuid.Generate(),
  9259  		Namespace:      e.Namespace,
  9260  		Priority:       e.Priority,
  9261  		Type:           e.Type,
  9262  		TriggeredBy:    EvalTriggerFailedFollowUp,
  9263  		JobID:          e.JobID,
  9264  		JobModifyIndex: e.JobModifyIndex,
  9265  		Status:         EvalStatusPending,
  9266  		Wait:           wait,
  9267  		PreviousEval:   e.ID,
  9268  		CreateTime:     now,
  9269  		ModifyTime:     now,
  9270  	}
  9271  }
  9272  
  9273  // UpdateModifyTime takes into account that clocks on different servers may be
  9274  // slightly out of sync. Even in case of a leader change, this method will
  9275  // guarantee that ModifyTime will always be after CreateTime.
  9276  func (e *Evaluation) UpdateModifyTime() {
  9277  	now := time.Now().UTC().UnixNano()
  9278  	if now <= e.CreateTime {
  9279  		e.ModifyTime = e.CreateTime + 1
  9280  	} else {
  9281  		e.ModifyTime = now
  9282  	}
  9283  }
  9284  
  9285  // Plan is used to submit a commit plan for task allocations. These
  9286  // are submitted to the leader which verifies that resources have
  9287  // not been overcommitted before admitting the plan.
  9288  type Plan struct {
  9289  	// msgpack omit empty fields during serialization
  9290  	_struct bool `codec:",omitempty"` // nolint: structcheck
  9291  
  9292  	// EvalID is the evaluation ID this plan is associated with
  9293  	EvalID string
  9294  
  9295  	// EvalToken is used to prevent a split-brain processing of
  9296  	// an evaluation. There should only be a single scheduler running
  9297  	// an Eval at a time, but this could be violated after a leadership
  9298  	// transition. This unique token is used to reject plans that are
  9299  	// being submitted from a different leader.
  9300  	EvalToken string
  9301  
  9302  	// Priority is the priority of the upstream job
  9303  	Priority int
  9304  
  9305  	// AllAtOnce is used to control if incremental scheduling of task groups
  9306  	// is allowed or if we must do a gang scheduling of the entire job.
  9307  	// If this is false, a plan may be partially applied. Otherwise, the
  9308  	// entire plan must be able to make progress.
  9309  	AllAtOnce bool
  9310  
  9311  	// Job is the parent job of all the allocations in the Plan.
  9312  	// Since a Plan only involves a single Job, we can reduce the size
  9313  	// of the plan by only including it once.
  9314  	Job *Job
  9315  
  9316  	// NodeUpdate contains all the allocations for each node. For each node,
  9317  	// this is a list of the allocations to update to either stop or evict.
  9318  	NodeUpdate map[string][]*Allocation
  9319  
  9320  	// NodeAllocation contains all the allocations for each node.
  9321  	// The evicts must be considered prior to the allocations.
  9322  	NodeAllocation map[string][]*Allocation
  9323  
  9324  	// Annotations contains annotations by the scheduler to be used by operators
  9325  	// to understand the decisions made by the scheduler.
  9326  	Annotations *PlanAnnotations
  9327  
  9328  	// Deployment is the deployment created or updated by the scheduler that
  9329  	// should be applied by the planner.
  9330  	Deployment *Deployment
  9331  
  9332  	// DeploymentUpdates is a set of status updates to apply to the given
  9333  	// deployments. This allows the scheduler to cancel any unneeded deployment
  9334  	// because the job is stopped or the update block is removed.
  9335  	DeploymentUpdates []*DeploymentStatusUpdate
  9336  
  9337  	// NodePreemptions is a map from node id to a set of allocations from other
  9338  	// lower priority jobs that are preempted. Preempted allocations are marked
  9339  	// as evicted.
  9340  	NodePreemptions map[string][]*Allocation
  9341  
  9342  	// SnapshotIndex is the Raft index of the snapshot used to create the
  9343  	// Plan. The leader will wait to evaluate the plan until its StateStore
  9344  	// has reached at least this index.
  9345  	SnapshotIndex uint64
  9346  }
  9347  
  9348  // AppendStoppedAlloc marks an allocation to be stopped. The clientStatus of the
  9349  // allocation may be optionally set by passing in a non-empty value.
  9350  func (p *Plan) AppendStoppedAlloc(alloc *Allocation, desiredDesc, clientStatus string) {
  9351  	newAlloc := new(Allocation)
  9352  	*newAlloc = *alloc
  9353  
  9354  	// If the job is not set in the plan we are deregistering a job so we
  9355  	// extract the job from the allocation.
  9356  	if p.Job == nil && newAlloc.Job != nil {
  9357  		p.Job = newAlloc.Job
  9358  	}
  9359  
  9360  	// Normalize the job
  9361  	newAlloc.Job = nil
  9362  
  9363  	// Strip the resources as it can be rebuilt.
  9364  	newAlloc.Resources = nil
  9365  
  9366  	newAlloc.DesiredStatus = AllocDesiredStatusStop
  9367  	newAlloc.DesiredDescription = desiredDesc
  9368  
  9369  	if clientStatus != "" {
  9370  		newAlloc.ClientStatus = clientStatus
  9371  	}
  9372  
  9373  	node := alloc.NodeID
  9374  	existing := p.NodeUpdate[node]
  9375  	p.NodeUpdate[node] = append(existing, newAlloc)
  9376  }
  9377  
  9378  // AppendPreemptedAlloc is used to append an allocation that's being preempted to the plan.
  9379  // To minimize the size of the plan, this only sets a minimal set of fields in the allocation
  9380  func (p *Plan) AppendPreemptedAlloc(alloc *Allocation, preemptingAllocID string) {
  9381  	newAlloc := &Allocation{}
  9382  	newAlloc.ID = alloc.ID
  9383  	newAlloc.JobID = alloc.JobID
  9384  	newAlloc.Namespace = alloc.Namespace
  9385  	newAlloc.DesiredStatus = AllocDesiredStatusEvict
  9386  	newAlloc.PreemptedByAllocation = preemptingAllocID
  9387  
  9388  	desiredDesc := fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID)
  9389  	newAlloc.DesiredDescription = desiredDesc
  9390  
  9391  	// TaskResources are needed by the plan applier to check if allocations fit
  9392  	// after removing preempted allocations
  9393  	if alloc.AllocatedResources != nil {
  9394  		newAlloc.AllocatedResources = alloc.AllocatedResources
  9395  	} else {
  9396  		// COMPAT Remove in version 0.11
  9397  		newAlloc.TaskResources = alloc.TaskResources
  9398  		newAlloc.SharedResources = alloc.SharedResources
  9399  	}
  9400  
  9401  	// Append this alloc to slice for this node
  9402  	node := alloc.NodeID
  9403  	existing := p.NodePreemptions[node]
  9404  	p.NodePreemptions[node] = append(existing, newAlloc)
  9405  }
  9406  
  9407  func (p *Plan) PopUpdate(alloc *Allocation) {
  9408  	existing := p.NodeUpdate[alloc.NodeID]
  9409  	n := len(existing)
  9410  	if n > 0 && existing[n-1].ID == alloc.ID {
  9411  		existing = existing[:n-1]
  9412  		if len(existing) > 0 {
  9413  			p.NodeUpdate[alloc.NodeID] = existing
  9414  		} else {
  9415  			delete(p.NodeUpdate, alloc.NodeID)
  9416  		}
  9417  	}
  9418  }
  9419  
  9420  func (p *Plan) AppendAlloc(alloc *Allocation) {
  9421  	node := alloc.NodeID
  9422  	existing := p.NodeAllocation[node]
  9423  
  9424  	// Normalize the job
  9425  	alloc.Job = nil
  9426  
  9427  	p.NodeAllocation[node] = append(existing, alloc)
  9428  }
  9429  
  9430  // IsNoOp checks if this plan would do nothing
  9431  func (p *Plan) IsNoOp() bool {
  9432  	return len(p.NodeUpdate) == 0 &&
  9433  		len(p.NodeAllocation) == 0 &&
  9434  		p.Deployment == nil &&
  9435  		len(p.DeploymentUpdates) == 0
  9436  }
  9437  
  9438  // NormalizeAllocations normalizes allocations to remove fields that can
  9439  // be fetched from the MemDB instead of sending over the wire
  9440  func (p *Plan) NormalizeAllocations() {
  9441  	for _, allocs := range p.NodeUpdate {
  9442  		for i, alloc := range allocs {
  9443  			allocs[i] = &Allocation{
  9444  				ID:                 alloc.ID,
  9445  				DesiredDescription: alloc.DesiredDescription,
  9446  				ClientStatus:       alloc.ClientStatus,
  9447  			}
  9448  		}
  9449  	}
  9450  
  9451  	for _, allocs := range p.NodePreemptions {
  9452  		for i, alloc := range allocs {
  9453  			allocs[i] = &Allocation{
  9454  				ID:                    alloc.ID,
  9455  				PreemptedByAllocation: alloc.PreemptedByAllocation,
  9456  			}
  9457  		}
  9458  	}
  9459  }
  9460  
  9461  // PlanResult is the result of a plan submitted to the leader.
  9462  type PlanResult struct {
  9463  	// NodeUpdate contains all the updates that were committed.
  9464  	NodeUpdate map[string][]*Allocation
  9465  
  9466  	// NodeAllocation contains all the allocations that were committed.
  9467  	NodeAllocation map[string][]*Allocation
  9468  
  9469  	// Deployment is the deployment that was committed.
  9470  	Deployment *Deployment
  9471  
  9472  	// DeploymentUpdates is the set of deployment updates that were committed.
  9473  	DeploymentUpdates []*DeploymentStatusUpdate
  9474  
  9475  	// NodePreemptions is a map from node id to a set of allocations from other
  9476  	// lower priority jobs that are preempted. Preempted allocations are marked
  9477  	// as stopped.
  9478  	NodePreemptions map[string][]*Allocation
  9479  
  9480  	// RefreshIndex is the index the worker should refresh state up to.
  9481  	// This allows all evictions and allocations to be materialized.
  9482  	// If any allocations were rejected due to stale data (node state,
  9483  	// over committed) this can be used to force a worker refresh.
  9484  	RefreshIndex uint64
  9485  
  9486  	// AllocIndex is the Raft index in which the evictions and
  9487  	// allocations took place. This is used for the write index.
  9488  	AllocIndex uint64
  9489  }
  9490  
  9491  // IsNoOp checks if this plan result would do nothing
  9492  func (p *PlanResult) IsNoOp() bool {
  9493  	return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 &&
  9494  		len(p.DeploymentUpdates) == 0 && p.Deployment == nil
  9495  }
  9496  
  9497  // FullCommit is used to check if all the allocations in a plan
  9498  // were committed as part of the result. Returns if there was
  9499  // a match, and the number of expected and actual allocations.
  9500  func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
  9501  	expected := 0
  9502  	actual := 0
  9503  	for name, allocList := range plan.NodeAllocation {
  9504  		didAlloc, _ := p.NodeAllocation[name]
  9505  		expected += len(allocList)
  9506  		actual += len(didAlloc)
  9507  	}
  9508  	return actual == expected, expected, actual
  9509  }
  9510  
  9511  // PlanAnnotations holds annotations made by the scheduler to give further debug
  9512  // information to operators.
  9513  type PlanAnnotations struct {
  9514  	// DesiredTGUpdates is the set of desired updates per task group.
  9515  	DesiredTGUpdates map[string]*DesiredUpdates
  9516  
  9517  	// PreemptedAllocs is the set of allocations to be preempted to make the placement successful.
  9518  	PreemptedAllocs []*AllocListStub
  9519  }
  9520  
  9521  // DesiredUpdates is the set of changes the scheduler would like to make given
  9522  // sufficient resources and cluster capacity.
  9523  type DesiredUpdates struct {
  9524  	Ignore            uint64
  9525  	Place             uint64
  9526  	Migrate           uint64
  9527  	Stop              uint64
  9528  	InPlaceUpdate     uint64
  9529  	DestructiveUpdate uint64
  9530  	Canary            uint64
  9531  	Preemptions       uint64
  9532  }
  9533  
  9534  func (d *DesiredUpdates) GoString() string {
  9535  	return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)",
  9536  		d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary)
  9537  }
  9538  
  9539  // msgpackHandle is a shared handle for encoding/decoding of structs
  9540  var MsgpackHandle = func() *codec.MsgpackHandle {
  9541  	h := &codec.MsgpackHandle{}
  9542  	h.RawToString = true
  9543  
  9544  	// maintain binary format from time prior to upgrading latest ugorji
  9545  	h.BasicHandle.TimeNotBuiltin = true
  9546  
  9547  	// Sets the default type for decoding a map into a nil interface{}.
  9548  	// This is necessary in particular because we store the driver configs as a
  9549  	// nil interface{}.
  9550  	h.MapType = reflect.TypeOf(map[string]interface{}(nil))
  9551  
  9552  	return h
  9553  }()
  9554  
  9555  var (
  9556  	// JsonHandle and JsonHandlePretty are the codec handles to JSON encode
  9557  	// structs. The pretty handle will add indents for easier human consumption.
  9558  	JsonHandle = &codec.JsonHandle{
  9559  		HTMLCharsAsIs: true,
  9560  	}
  9561  	JsonHandlePretty = &codec.JsonHandle{
  9562  		HTMLCharsAsIs: true,
  9563  		Indent:        4,
  9564  	}
  9565  )
  9566  
  9567  // TODO Figure out if we can remove this. This is our fork that is just way
  9568  // behind. I feel like its original purpose was to pin at a stable version but
  9569  // now we can accomplish this with vendoring.
  9570  var HashiMsgpackHandle = func() *hcodec.MsgpackHandle {
  9571  	h := &hcodec.MsgpackHandle{}
  9572  	h.RawToString = true
  9573  
  9574  	// maintain binary format from time prior to upgrading latest ugorji
  9575  	h.BasicHandle.TimeNotBuiltin = true
  9576  
  9577  	// Sets the default type for decoding a map into a nil interface{}.
  9578  	// This is necessary in particular because we store the driver configs as a
  9579  	// nil interface{}.
  9580  	h.MapType = reflect.TypeOf(map[string]interface{}(nil))
  9581  	return h
  9582  }()
  9583  
  9584  // Decode is used to decode a MsgPack encoded object
  9585  func Decode(buf []byte, out interface{}) error {
  9586  	return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
  9587  }
  9588  
  9589  // Encode is used to encode a MsgPack object with type prefix
  9590  func Encode(t MessageType, msg interface{}) ([]byte, error) {
  9591  	var buf bytes.Buffer
  9592  	buf.WriteByte(uint8(t))
  9593  	err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
  9594  	return buf.Bytes(), err
  9595  }
  9596  
  9597  // KeyringResponse is a unified key response and can be used for install,
  9598  // remove, use, as well as listing key queries.
  9599  type KeyringResponse struct {
  9600  	Messages map[string]string
  9601  	Keys     map[string]int
  9602  	NumNodes int
  9603  }
  9604  
  9605  // KeyringRequest is request objects for serf key operations.
  9606  type KeyringRequest struct {
  9607  	Key string
  9608  }
  9609  
  9610  // RecoverableError wraps an error and marks whether it is recoverable and could
  9611  // be retried or it is fatal.
  9612  type RecoverableError struct {
  9613  	Err         string
  9614  	Recoverable bool
  9615  }
  9616  
  9617  // NewRecoverableError is used to wrap an error and mark it as recoverable or
  9618  // not.
  9619  func NewRecoverableError(e error, recoverable bool) error {
  9620  	if e == nil {
  9621  		return nil
  9622  	}
  9623  
  9624  	return &RecoverableError{
  9625  		Err:         e.Error(),
  9626  		Recoverable: recoverable,
  9627  	}
  9628  }
  9629  
  9630  // WrapRecoverable wraps an existing error in a new RecoverableError with a new
  9631  // message. If the error was recoverable before the returned error is as well;
  9632  // otherwise it is unrecoverable.
  9633  func WrapRecoverable(msg string, err error) error {
  9634  	return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)}
  9635  }
  9636  
  9637  func (r *RecoverableError) Error() string {
  9638  	return r.Err
  9639  }
  9640  
  9641  func (r *RecoverableError) IsRecoverable() bool {
  9642  	return r.Recoverable
  9643  }
  9644  
  9645  func (r *RecoverableError) IsUnrecoverable() bool {
  9646  	return !r.Recoverable
  9647  }
  9648  
  9649  // Recoverable is an interface for errors to implement to indicate whether or
  9650  // not they are fatal or recoverable.
  9651  type Recoverable interface {
  9652  	error
  9653  	IsRecoverable() bool
  9654  }
  9655  
  9656  // IsRecoverable returns true if error is a RecoverableError with
  9657  // Recoverable=true. Otherwise false is returned.
  9658  func IsRecoverable(e error) bool {
  9659  	if re, ok := e.(Recoverable); ok {
  9660  		return re.IsRecoverable()
  9661  	}
  9662  	return false
  9663  }
  9664  
  9665  // WrappedServerError wraps an error and satisfies
  9666  // both the Recoverable and the ServerSideError interfaces
  9667  type WrappedServerError struct {
  9668  	Err error
  9669  }
  9670  
  9671  // NewWrappedServerError is used to create a wrapped server side error
  9672  func NewWrappedServerError(e error) error {
  9673  	return &WrappedServerError{
  9674  		Err: e,
  9675  	}
  9676  }
  9677  
  9678  func (r *WrappedServerError) IsRecoverable() bool {
  9679  	return IsRecoverable(r.Err)
  9680  }
  9681  
  9682  func (r *WrappedServerError) Error() string {
  9683  	return r.Err.Error()
  9684  }
  9685  
  9686  func (r *WrappedServerError) IsServerSide() bool {
  9687  	return true
  9688  }
  9689  
  9690  // ServerSideError is an interface for errors to implement to indicate
  9691  // errors occurring after the request makes it to a server
  9692  type ServerSideError interface {
  9693  	error
  9694  	IsServerSide() bool
  9695  }
  9696  
  9697  // IsServerSide returns true if error is a wrapped
  9698  // server side error
  9699  func IsServerSide(e error) bool {
  9700  	if se, ok := e.(ServerSideError); ok {
  9701  		return se.IsServerSide()
  9702  	}
  9703  	return false
  9704  }
  9705  
  9706  // ACLPolicy is used to represent an ACL policy
  9707  type ACLPolicy struct {
  9708  	Name        string      // Unique name
  9709  	Description string      // Human readable
  9710  	Rules       string      // HCL or JSON format
  9711  	RulesJSON   *acl.Policy // Generated from Rules on read
  9712  	Hash        []byte
  9713  	CreateIndex uint64
  9714  	ModifyIndex uint64
  9715  }
  9716  
  9717  // SetHash is used to compute and set the hash of the ACL policy
  9718  func (c *ACLPolicy) SetHash() []byte {
  9719  	// Initialize a 256bit Blake2 hash (32 bytes)
  9720  	hash, err := blake2b.New256(nil)
  9721  	if err != nil {
  9722  		panic(err)
  9723  	}
  9724  
  9725  	// Write all the user set fields
  9726  	hash.Write([]byte(c.Name))
  9727  	hash.Write([]byte(c.Description))
  9728  	hash.Write([]byte(c.Rules))
  9729  
  9730  	// Finalize the hash
  9731  	hashVal := hash.Sum(nil)
  9732  
  9733  	// Set and return the hash
  9734  	c.Hash = hashVal
  9735  	return hashVal
  9736  }
  9737  
  9738  func (a *ACLPolicy) Stub() *ACLPolicyListStub {
  9739  	return &ACLPolicyListStub{
  9740  		Name:        a.Name,
  9741  		Description: a.Description,
  9742  		Hash:        a.Hash,
  9743  		CreateIndex: a.CreateIndex,
  9744  		ModifyIndex: a.ModifyIndex,
  9745  	}
  9746  }
  9747  
  9748  func (a *ACLPolicy) Validate() error {
  9749  	var mErr multierror.Error
  9750  	if !validPolicyName.MatchString(a.Name) {
  9751  		err := fmt.Errorf("invalid name '%s'", a.Name)
  9752  		mErr.Errors = append(mErr.Errors, err)
  9753  	}
  9754  	if _, err := acl.Parse(a.Rules); err != nil {
  9755  		err = fmt.Errorf("failed to parse rules: %v", err)
  9756  		mErr.Errors = append(mErr.Errors, err)
  9757  	}
  9758  	if len(a.Description) > maxPolicyDescriptionLength {
  9759  		err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength)
  9760  		mErr.Errors = append(mErr.Errors, err)
  9761  	}
  9762  	return mErr.ErrorOrNil()
  9763  }
  9764  
  9765  // ACLPolicyListStub is used to for listing ACL policies
  9766  type ACLPolicyListStub struct {
  9767  	Name        string
  9768  	Description string
  9769  	Hash        []byte
  9770  	CreateIndex uint64
  9771  	ModifyIndex uint64
  9772  }
  9773  
  9774  // ACLPolicyListRequest is used to request a list of policies
  9775  type ACLPolicyListRequest struct {
  9776  	QueryOptions
  9777  }
  9778  
  9779  // ACLPolicySpecificRequest is used to query a specific policy
  9780  type ACLPolicySpecificRequest struct {
  9781  	Name string
  9782  	QueryOptions
  9783  }
  9784  
  9785  // ACLPolicySetRequest is used to query a set of policies
  9786  type ACLPolicySetRequest struct {
  9787  	Names []string
  9788  	QueryOptions
  9789  }
  9790  
  9791  // ACLPolicyListResponse is used for a list request
  9792  type ACLPolicyListResponse struct {
  9793  	Policies []*ACLPolicyListStub
  9794  	QueryMeta
  9795  }
  9796  
  9797  // SingleACLPolicyResponse is used to return a single policy
  9798  type SingleACLPolicyResponse struct {
  9799  	Policy *ACLPolicy
  9800  	QueryMeta
  9801  }
  9802  
  9803  // ACLPolicySetResponse is used to return a set of policies
  9804  type ACLPolicySetResponse struct {
  9805  	Policies map[string]*ACLPolicy
  9806  	QueryMeta
  9807  }
  9808  
  9809  // ACLPolicyDeleteRequest is used to delete a set of policies
  9810  type ACLPolicyDeleteRequest struct {
  9811  	Names []string
  9812  	WriteRequest
  9813  }
  9814  
  9815  // ACLPolicyUpsertRequest is used to upsert a set of policies
  9816  type ACLPolicyUpsertRequest struct {
  9817  	Policies []*ACLPolicy
  9818  	WriteRequest
  9819  }
  9820  
  9821  // ACLToken represents a client token which is used to Authenticate
  9822  type ACLToken struct {
  9823  	AccessorID  string   // Public Accessor ID (UUID)
  9824  	SecretID    string   // Secret ID, private (UUID)
  9825  	Name        string   // Human friendly name
  9826  	Type        string   // Client or Management
  9827  	Policies    []string // Policies this token ties to
  9828  	Global      bool     // Global or Region local
  9829  	Hash        []byte
  9830  	CreateTime  time.Time // Time of creation
  9831  	CreateIndex uint64
  9832  	ModifyIndex uint64
  9833  }
  9834  
  9835  var (
  9836  	// AnonymousACLToken is used no SecretID is provided, and the
  9837  	// request is made anonymously.
  9838  	AnonymousACLToken = &ACLToken{
  9839  		AccessorID: "anonymous",
  9840  		Name:       "Anonymous Token",
  9841  		Type:       ACLClientToken,
  9842  		Policies:   []string{"anonymous"},
  9843  		Global:     false,
  9844  	}
  9845  )
  9846  
  9847  type ACLTokenListStub struct {
  9848  	AccessorID  string
  9849  	Name        string
  9850  	Type        string
  9851  	Policies    []string
  9852  	Global      bool
  9853  	Hash        []byte
  9854  	CreateTime  time.Time
  9855  	CreateIndex uint64
  9856  	ModifyIndex uint64
  9857  }
  9858  
  9859  // SetHash is used to compute and set the hash of the ACL token
  9860  func (a *ACLToken) SetHash() []byte {
  9861  	// Initialize a 256bit Blake2 hash (32 bytes)
  9862  	hash, err := blake2b.New256(nil)
  9863  	if err != nil {
  9864  		panic(err)
  9865  	}
  9866  
  9867  	// Write all the user set fields
  9868  	hash.Write([]byte(a.Name))
  9869  	hash.Write([]byte(a.Type))
  9870  	for _, policyName := range a.Policies {
  9871  		hash.Write([]byte(policyName))
  9872  	}
  9873  	if a.Global {
  9874  		hash.Write([]byte("global"))
  9875  	} else {
  9876  		hash.Write([]byte("local"))
  9877  	}
  9878  
  9879  	// Finalize the hash
  9880  	hashVal := hash.Sum(nil)
  9881  
  9882  	// Set and return the hash
  9883  	a.Hash = hashVal
  9884  	return hashVal
  9885  }
  9886  
  9887  func (a *ACLToken) Stub() *ACLTokenListStub {
  9888  	return &ACLTokenListStub{
  9889  		AccessorID:  a.AccessorID,
  9890  		Name:        a.Name,
  9891  		Type:        a.Type,
  9892  		Policies:    a.Policies,
  9893  		Global:      a.Global,
  9894  		Hash:        a.Hash,
  9895  		CreateTime:  a.CreateTime,
  9896  		CreateIndex: a.CreateIndex,
  9897  		ModifyIndex: a.ModifyIndex,
  9898  	}
  9899  }
  9900  
  9901  // Validate is used to sanity check a token
  9902  func (a *ACLToken) Validate() error {
  9903  	var mErr multierror.Error
  9904  	if len(a.Name) > maxTokenNameLength {
  9905  		mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long"))
  9906  	}
  9907  	switch a.Type {
  9908  	case ACLClientToken:
  9909  		if len(a.Policies) == 0 {
  9910  			mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies"))
  9911  		}
  9912  	case ACLManagementToken:
  9913  		if len(a.Policies) != 0 {
  9914  			mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies"))
  9915  		}
  9916  	default:
  9917  		mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management"))
  9918  	}
  9919  	return mErr.ErrorOrNil()
  9920  }
  9921  
  9922  // PolicySubset checks if a given set of policies is a subset of the token
  9923  func (a *ACLToken) PolicySubset(policies []string) bool {
  9924  	// Hot-path the management tokens, superset of all policies.
  9925  	if a.Type == ACLManagementToken {
  9926  		return true
  9927  	}
  9928  	associatedPolicies := make(map[string]struct{}, len(a.Policies))
  9929  	for _, policy := range a.Policies {
  9930  		associatedPolicies[policy] = struct{}{}
  9931  	}
  9932  	for _, policy := range policies {
  9933  		if _, ok := associatedPolicies[policy]; !ok {
  9934  			return false
  9935  		}
  9936  	}
  9937  	return true
  9938  }
  9939  
  9940  // ACLTokenListRequest is used to request a list of tokens
  9941  type ACLTokenListRequest struct {
  9942  	GlobalOnly bool
  9943  	QueryOptions
  9944  }
  9945  
  9946  // ACLTokenSpecificRequest is used to query a specific token
  9947  type ACLTokenSpecificRequest struct {
  9948  	AccessorID string
  9949  	QueryOptions
  9950  }
  9951  
  9952  // ACLTokenSetRequest is used to query a set of tokens
  9953  type ACLTokenSetRequest struct {
  9954  	AccessorIDS []string
  9955  	QueryOptions
  9956  }
  9957  
  9958  // ACLTokenListResponse is used for a list request
  9959  type ACLTokenListResponse struct {
  9960  	Tokens []*ACLTokenListStub
  9961  	QueryMeta
  9962  }
  9963  
  9964  // SingleACLTokenResponse is used to return a single token
  9965  type SingleACLTokenResponse struct {
  9966  	Token *ACLToken
  9967  	QueryMeta
  9968  }
  9969  
  9970  // ACLTokenSetResponse is used to return a set of token
  9971  type ACLTokenSetResponse struct {
  9972  	Tokens map[string]*ACLToken // Keyed by Accessor ID
  9973  	QueryMeta
  9974  }
  9975  
  9976  // ResolveACLTokenRequest is used to resolve a specific token
  9977  type ResolveACLTokenRequest struct {
  9978  	SecretID string
  9979  	QueryOptions
  9980  }
  9981  
  9982  // ResolveACLTokenResponse is used to resolve a single token
  9983  type ResolveACLTokenResponse struct {
  9984  	Token *ACLToken
  9985  	QueryMeta
  9986  }
  9987  
  9988  // ACLTokenDeleteRequest is used to delete a set of tokens
  9989  type ACLTokenDeleteRequest struct {
  9990  	AccessorIDs []string
  9991  	WriteRequest
  9992  }
  9993  
  9994  // ACLTokenBootstrapRequest is used to bootstrap ACLs
  9995  type ACLTokenBootstrapRequest struct {
  9996  	Token      *ACLToken // Not client specifiable
  9997  	ResetIndex uint64    // Reset index is used to clear the bootstrap token
  9998  	WriteRequest
  9999  }
 10000  
 10001  // ACLTokenUpsertRequest is used to upsert a set of tokens
 10002  type ACLTokenUpsertRequest struct {
 10003  	Tokens []*ACLToken
 10004  	WriteRequest
 10005  }
 10006  
 10007  // ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest
 10008  type ACLTokenUpsertResponse struct {
 10009  	Tokens []*ACLToken
 10010  	WriteMeta
 10011  }