github.com/ferranbt/nomad@v0.9.3-0.20190607002617-85c449b7667c/nomad/config.go (about)

     1  package nomad
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"net"
     7  	"os"
     8  	"runtime"
     9  	"time"
    10  
    11  	log "github.com/hashicorp/go-hclog"
    12  
    13  	"github.com/hashicorp/memberlist"
    14  	"github.com/hashicorp/nomad/helper/pluginutils/loader"
    15  	"github.com/hashicorp/nomad/helper/uuid"
    16  	"github.com/hashicorp/nomad/nomad/structs"
    17  	"github.com/hashicorp/nomad/nomad/structs/config"
    18  	"github.com/hashicorp/nomad/scheduler"
    19  	"github.com/hashicorp/raft"
    20  	"github.com/hashicorp/serf/serf"
    21  )
    22  
    23  const (
    24  	DefaultRegion   = "global"
    25  	DefaultDC       = "dc1"
    26  	DefaultSerfPort = 4648
    27  )
    28  
    29  // These are the protocol versions that Nomad can understand
    30  const (
    31  	ProtocolVersionMin uint8 = 1
    32  	ProtocolVersionMax       = 1
    33  )
    34  
    35  // ProtocolVersionMap is the mapping of Nomad protocol versions
    36  // to Serf protocol versions. We mask the Serf protocols using
    37  // our own protocol version.
    38  var protocolVersionMap map[uint8]uint8
    39  
    40  func init() {
    41  	protocolVersionMap = map[uint8]uint8{
    42  		1: 4,
    43  	}
    44  }
    45  
    46  var (
    47  	DefaultRPCAddr = &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 4647}
    48  )
    49  
    50  // Config is used to parameterize the server
    51  type Config struct {
    52  	// Bootstrap mode is used to bring up the first Nomad server.  It is
    53  	// required so that it can elect a leader without any other nodes
    54  	// being present
    55  	Bootstrap bool
    56  
    57  	// BootstrapExpect mode is used to automatically bring up a
    58  	// collection of Nomad servers. This can be used to automatically
    59  	// bring up a collection of nodes.  All operations on BootstrapExpect
    60  	// must be handled via `atomic.*Int32()` calls.
    61  	BootstrapExpect int32
    62  
    63  	// DataDir is the directory to store our state in
    64  	DataDir string
    65  
    66  	// DevMode is used for development purposes only and limits the
    67  	// use of persistence or state.
    68  	DevMode bool
    69  
    70  	// DevDisableBootstrap is used to disable bootstrap mode while
    71  	// in DevMode. This is largely used for testing.
    72  	DevDisableBootstrap bool
    73  
    74  	// LogOutput is the location to write logs to. If this is not set,
    75  	// logs will go to stderr.
    76  	LogOutput io.Writer
    77  
    78  	// Logger is the logger used by the server.
    79  	Logger log.Logger
    80  
    81  	// ProtocolVersion is the protocol version to speak. This must be between
    82  	// ProtocolVersionMin and ProtocolVersionMax.
    83  	ProtocolVersion uint8
    84  
    85  	// RPCAddr is the RPC address used by Nomad. This should be reachable
    86  	// by the other servers and clients
    87  	RPCAddr *net.TCPAddr
    88  
    89  	// ClientRPCAdvertise is the address that is advertised to client nodes for
    90  	// the RPC endpoint. This can differ from the RPC address, if for example
    91  	// the RPCAddr is unspecified "0.0.0.0:4646", but this address must be
    92  	// reachable
    93  	ClientRPCAdvertise *net.TCPAddr
    94  
    95  	// ServerRPCAdvertise is the address that is advertised to other servers for
    96  	// the RPC endpoint. This can differ from the RPC address, if for example
    97  	// the RPCAddr is unspecified "0.0.0.0:4646", but this address must be
    98  	// reachable
    99  	ServerRPCAdvertise *net.TCPAddr
   100  
   101  	// RaftConfig is the configuration used for Raft in the local DC
   102  	RaftConfig *raft.Config
   103  
   104  	// RaftTimeout is applied to any network traffic for raft. Defaults to 10s.
   105  	RaftTimeout time.Duration
   106  
   107  	// (Enterprise-only) NonVoter is used to prevent this server from being added
   108  	// as a voting member of the Raft cluster.
   109  	NonVoter bool
   110  
   111  	// (Enterprise-only) RedundancyZone is the redundancy zone to use for this server.
   112  	RedundancyZone string
   113  
   114  	// (Enterprise-only) UpgradeVersion is the custom upgrade version to use when
   115  	// performing upgrade migrations.
   116  	UpgradeVersion string
   117  
   118  	// SerfConfig is the configuration for the serf cluster
   119  	SerfConfig *serf.Config
   120  
   121  	// Node name is the name we use to advertise. Defaults to hostname.
   122  	NodeName string
   123  
   124  	// NodeID is the uuid of this server.
   125  	NodeID string
   126  
   127  	// Region is the region this Nomad server belongs to.
   128  	Region string
   129  
   130  	// AuthoritativeRegion is the region which is treated as the authoritative source
   131  	// for ACLs and Policies. This provides a single source of truth to resolve conflicts.
   132  	AuthoritativeRegion string
   133  
   134  	// Datacenter is the datacenter this Nomad server belongs to.
   135  	Datacenter string
   136  
   137  	// Build is a string that is gossiped around, and can be used to help
   138  	// operators track which versions are actively deployed
   139  	Build string
   140  
   141  	// NumSchedulers is the number of scheduler thread that are run.
   142  	// This can be as many as one per core, or zero to disable this server
   143  	// from doing any scheduling work.
   144  	NumSchedulers int
   145  
   146  	// EnabledSchedulers controls the set of sub-schedulers that are
   147  	// enabled for this server to handle. This will restrict the evaluations
   148  	// that the workers dequeue for processing.
   149  	EnabledSchedulers []string
   150  
   151  	// ReconcileInterval controls how often we reconcile the strongly
   152  	// consistent store with the Serf info. This is used to handle nodes
   153  	// that are force removed, as well as intermittent unavailability during
   154  	// leader election.
   155  	ReconcileInterval time.Duration
   156  
   157  	// EvalGCInterval is how often we dispatch a job to GC evaluations
   158  	EvalGCInterval time.Duration
   159  
   160  	// EvalGCThreshold is how "old" an evaluation must be to be eligible
   161  	// for GC. This gives users some time to debug a failed evaluation.
   162  	EvalGCThreshold time.Duration
   163  
   164  	// JobGCInterval is how often we dispatch a job to GC jobs that are
   165  	// available for garbage collection.
   166  	JobGCInterval time.Duration
   167  
   168  	// JobGCThreshold is how old a job must be before it eligible for GC. This gives
   169  	// the user time to inspect the job.
   170  	JobGCThreshold time.Duration
   171  
   172  	// NodeGCInterval is how often we dispatch a job to GC failed nodes.
   173  	NodeGCInterval time.Duration
   174  
   175  	// NodeGCThreshold is how "old" a node must be to be eligible
   176  	// for GC. This gives users some time to view and debug a failed nodes.
   177  	NodeGCThreshold time.Duration
   178  
   179  	// DeploymentGCInterval is how often we dispatch a job to GC terminal
   180  	// deployments.
   181  	DeploymentGCInterval time.Duration
   182  
   183  	// DeploymentGCThreshold is how "old" a deployment must be to be eligible
   184  	// for GC. This gives users some time to view terminal deployments.
   185  	DeploymentGCThreshold time.Duration
   186  
   187  	// EvalNackTimeout controls how long we allow a sub-scheduler to
   188  	// work on an evaluation before we consider it failed and Nack it.
   189  	// This allows that evaluation to be handed to another sub-scheduler
   190  	// to work on. Defaults to 60 seconds. This should be long enough that
   191  	// no evaluation hits it unless the sub-scheduler has failed.
   192  	EvalNackTimeout time.Duration
   193  
   194  	// EvalDeliveryLimit is the limit of attempts we make to deliver and
   195  	// process an evaluation. This is used so that an eval that will never
   196  	// complete eventually fails out of the system.
   197  	EvalDeliveryLimit int
   198  
   199  	// EvalNackInitialReenqueueDelay is the delay applied before reenqueuing a
   200  	// Nacked evaluation for the first time. This value should be small as the
   201  	// initial Nack can be due to a down machine and the eval should be retried
   202  	// quickly for liveliness.
   203  	EvalNackInitialReenqueueDelay time.Duration
   204  
   205  	// EvalNackSubsequentReenqueueDelay is the delay applied before reenqueuing
   206  	// an evaluation that has been Nacked more than once. This delay is
   207  	// compounding after the first Nack. This value should be significantly
   208  	// longer than the initial delay as the purpose it severs is to apply
   209  	// back-pressure as evaluations are being Nacked either due to scheduler
   210  	// failures or because they are hitting their Nack timeout, both of which
   211  	// are signs of high server resource usage.
   212  	EvalNackSubsequentReenqueueDelay time.Duration
   213  
   214  	// EvalFailedFollowupBaselineDelay is the minimum time waited before
   215  	// retrying a failed evaluation.
   216  	EvalFailedFollowupBaselineDelay time.Duration
   217  
   218  	// EvalFailedFollowupDelayRange defines the range of additional time from
   219  	// the baseline in which to wait before retrying a failed evaluation. The
   220  	// additional delay is selected from this range randomly.
   221  	EvalFailedFollowupDelayRange time.Duration
   222  
   223  	// MinHeartbeatTTL is the minimum time between heartbeats.
   224  	// This is used as a floor to prevent excessive updates.
   225  	MinHeartbeatTTL time.Duration
   226  
   227  	// MaxHeartbeatsPerSecond is the maximum target rate of heartbeats
   228  	// being processed per second. This allows the TTL to be increased
   229  	// to meet the target rate.
   230  	MaxHeartbeatsPerSecond float64
   231  
   232  	// HeartbeatGrace is the additional time given as a grace period
   233  	// beyond the TTL to account for network and processing delays
   234  	// as well as clock skew.
   235  	HeartbeatGrace time.Duration
   236  
   237  	// FailoverHeartbeatTTL is the TTL applied to heartbeats after
   238  	// a new leader is elected, since we no longer know the status
   239  	// of all the heartbeats.
   240  	FailoverHeartbeatTTL time.Duration
   241  
   242  	// ConsulConfig is this Agent's Consul configuration
   243  	ConsulConfig *config.ConsulConfig
   244  
   245  	// VaultConfig is this Agent's Vault configuration
   246  	VaultConfig *config.VaultConfig
   247  
   248  	// RPCHoldTimeout is how long an RPC can be "held" before it is errored.
   249  	// This is used to paper over a loss of leadership by instead holding RPCs,
   250  	// so that the caller experiences a slow response rather than an error.
   251  	// This period is meant to be long enough for a leader election to take
   252  	// place, and a small jitter is applied to avoid a thundering herd.
   253  	RPCHoldTimeout time.Duration
   254  
   255  	// TLSConfig holds various TLS related configurations
   256  	TLSConfig *config.TLSConfig
   257  
   258  	// ACLEnabled controls if ACL enforcement and management is enabled.
   259  	ACLEnabled bool
   260  
   261  	// ReplicationBackoff is how much we backoff when replication errors.
   262  	// This is a tunable knob for testing primarily.
   263  	ReplicationBackoff time.Duration
   264  
   265  	// ReplicationToken is the ACL Token Secret ID used to fetch from
   266  	// the Authoritative Region.
   267  	ReplicationToken string
   268  
   269  	// SentinelGCInterval is the interval that we GC unused policies.
   270  	SentinelGCInterval time.Duration
   271  
   272  	// SentinelConfig is this Agent's Sentinel configuration
   273  	SentinelConfig *config.SentinelConfig
   274  
   275  	// StatsCollectionInterval is the interval at which the Nomad server
   276  	// publishes metrics which are periodic in nature like updating gauges
   277  	StatsCollectionInterval time.Duration
   278  
   279  	// DisableTaggedMetrics determines whether metrics will be displayed via a
   280  	// key/value/tag format, or simply a key/value format
   281  	DisableTaggedMetrics bool
   282  
   283  	// DisableDispatchedJobSummaryMetrics allows for ignore dispatched jobs when
   284  	// publishing Job summary metrics
   285  	DisableDispatchedJobSummaryMetrics bool
   286  
   287  	// BackwardsCompatibleMetrics determines whether to show methods of
   288  	// displaying metrics for older versions, or to only show the new format
   289  	BackwardsCompatibleMetrics bool
   290  
   291  	// AutopilotConfig is used to apply the initial autopilot config when
   292  	// bootstrapping.
   293  	AutopilotConfig *structs.AutopilotConfig
   294  
   295  	// ServerHealthInterval is the frequency with which the health of the
   296  	// servers in the cluster will be updated.
   297  	ServerHealthInterval time.Duration
   298  
   299  	// AutopilotInterval is the frequency with which the leader will perform
   300  	// autopilot tasks, such as promoting eligible non-voters and removing
   301  	// dead servers.
   302  	AutopilotInterval time.Duration
   303  
   304  	// PluginLoader is used to load plugins.
   305  	PluginLoader loader.PluginCatalog
   306  
   307  	// PluginSingletonLoader is a plugin loader that will returns singleton
   308  	// instances of the plugins.
   309  	PluginSingletonLoader loader.PluginCatalog
   310  }
   311  
   312  // CheckVersion is used to check if the ProtocolVersion is valid
   313  func (c *Config) CheckVersion() error {
   314  	if c.ProtocolVersion < ProtocolVersionMin {
   315  		return fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]",
   316  			c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
   317  	} else if c.ProtocolVersion > ProtocolVersionMax {
   318  		return fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]",
   319  			c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
   320  	}
   321  	return nil
   322  }
   323  
   324  // DefaultConfig returns the default configuration
   325  func DefaultConfig() *Config {
   326  	hostname, err := os.Hostname()
   327  	if err != nil {
   328  		panic(err)
   329  	}
   330  
   331  	c := &Config{
   332  		Region:                           DefaultRegion,
   333  		AuthoritativeRegion:              DefaultRegion,
   334  		Datacenter:                       DefaultDC,
   335  		NodeName:                         hostname,
   336  		NodeID:                           uuid.Generate(),
   337  		ProtocolVersion:                  ProtocolVersionMax,
   338  		RaftConfig:                       raft.DefaultConfig(),
   339  		RaftTimeout:                      10 * time.Second,
   340  		LogOutput:                        os.Stderr,
   341  		RPCAddr:                          DefaultRPCAddr,
   342  		SerfConfig:                       serf.DefaultConfig(),
   343  		NumSchedulers:                    1,
   344  		ReconcileInterval:                60 * time.Second,
   345  		EvalGCInterval:                   5 * time.Minute,
   346  		EvalGCThreshold:                  1 * time.Hour,
   347  		JobGCInterval:                    5 * time.Minute,
   348  		JobGCThreshold:                   4 * time.Hour,
   349  		NodeGCInterval:                   5 * time.Minute,
   350  		NodeGCThreshold:                  24 * time.Hour,
   351  		DeploymentGCInterval:             5 * time.Minute,
   352  		DeploymentGCThreshold:            1 * time.Hour,
   353  		EvalNackTimeout:                  60 * time.Second,
   354  		EvalDeliveryLimit:                3,
   355  		EvalNackInitialReenqueueDelay:    1 * time.Second,
   356  		EvalNackSubsequentReenqueueDelay: 20 * time.Second,
   357  		EvalFailedFollowupBaselineDelay:  1 * time.Minute,
   358  		EvalFailedFollowupDelayRange:     5 * time.Minute,
   359  		MinHeartbeatTTL:                  10 * time.Second,
   360  		MaxHeartbeatsPerSecond:           50.0,
   361  		HeartbeatGrace:                   10 * time.Second,
   362  		FailoverHeartbeatTTL:             300 * time.Second,
   363  		ConsulConfig:                     config.DefaultConsulConfig(),
   364  		VaultConfig:                      config.DefaultVaultConfig(),
   365  		RPCHoldTimeout:                   5 * time.Second,
   366  		StatsCollectionInterval:          1 * time.Minute,
   367  		TLSConfig:                        &config.TLSConfig{},
   368  		ReplicationBackoff:               30 * time.Second,
   369  		SentinelGCInterval:               30 * time.Second,
   370  		AutopilotConfig: &structs.AutopilotConfig{
   371  			CleanupDeadServers:      true,
   372  			LastContactThreshold:    200 * time.Millisecond,
   373  			MaxTrailingLogs:         250,
   374  			ServerStabilizationTime: 10 * time.Second,
   375  		},
   376  		ServerHealthInterval: 2 * time.Second,
   377  		AutopilotInterval:    10 * time.Second,
   378  	}
   379  
   380  	// Enable all known schedulers by default
   381  	c.EnabledSchedulers = make([]string, 0, len(scheduler.BuiltinSchedulers))
   382  	for name := range scheduler.BuiltinSchedulers {
   383  		c.EnabledSchedulers = append(c.EnabledSchedulers, name)
   384  	}
   385  	c.EnabledSchedulers = append(c.EnabledSchedulers, structs.JobTypeCore)
   386  
   387  	// Default the number of schedulers to match the cores
   388  	c.NumSchedulers = runtime.NumCPU()
   389  
   390  	// Increase our reap interval to 3 days instead of 24h.
   391  	c.SerfConfig.ReconnectTimeout = 3 * 24 * time.Hour
   392  
   393  	// Serf should use the WAN timing, since we are using it
   394  	// to communicate between DC's
   395  	c.SerfConfig.MemberlistConfig = memberlist.DefaultWANConfig()
   396  	c.SerfConfig.MemberlistConfig.BindPort = DefaultSerfPort
   397  
   398  	// Disable shutdown on removal
   399  	c.RaftConfig.ShutdownOnRemove = false
   400  
   401  	// Enable interoperability with new raft APIs, requires all servers
   402  	// to be on raft v1 or higher.
   403  	c.RaftConfig.ProtocolVersion = 2
   404  
   405  	return c
   406  }