github.com/diptanu/nomad@v0.5.7-0.20170516172507-d72e86cbe3d9/nomad/config.go (about)

     1  package nomad
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"net"
     7  	"os"
     8  	"runtime"
     9  	"time"
    10  
    11  	"github.com/hashicorp/memberlist"
    12  	"github.com/hashicorp/nomad/helper/tlsutil"
    13  	"github.com/hashicorp/nomad/nomad/structs"
    14  	"github.com/hashicorp/nomad/nomad/structs/config"
    15  	"github.com/hashicorp/nomad/scheduler"
    16  	"github.com/hashicorp/raft"
    17  	"github.com/hashicorp/serf/serf"
    18  )
    19  
    20  const (
    21  	DefaultRegion   = "global"
    22  	DefaultDC       = "dc1"
    23  	DefaultSerfPort = 4648
    24  )
    25  
    26  // These are the protocol versions that Nomad can understand
    27  const (
    28  	ProtocolVersionMin uint8 = 1
    29  	ProtocolVersionMax       = 1
    30  )
    31  
    32  // ProtocolVersionMap is the mapping of Nomad protocol versions
    33  // to Serf protocol versions. We mask the Serf protocols using
    34  // our own protocol version.
    35  var protocolVersionMap map[uint8]uint8
    36  
    37  func init() {
    38  	protocolVersionMap = map[uint8]uint8{
    39  		1: 4,
    40  	}
    41  }
    42  
    43  var (
    44  	DefaultRPCAddr = &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 4647}
    45  )
    46  
    47  // Config is used to parameterize the server
    48  type Config struct {
    49  	// Bootstrap mode is used to bring up the first Nomad server.  It is
    50  	// required so that it can elect a leader without any other nodes
    51  	// being present
    52  	Bootstrap bool
    53  
    54  	// BootstrapExpect mode is used to automatically bring up a
    55  	// collection of Nomad servers. This can be used to automatically
    56  	// bring up a collection of nodes.  All operations on BootstrapExpect
    57  	// must be handled via `atomic.*Int32()` calls.
    58  	BootstrapExpect int32
    59  
    60  	// DataDir is the directory to store our state in
    61  	DataDir string
    62  
    63  	// DevMode is used for development purposes only and limits the
    64  	// use of persistence or state.
    65  	DevMode bool
    66  
    67  	// DevDisableBootstrap is used to disable bootstrap mode while
    68  	// in DevMode. This is largely used for testing.
    69  	DevDisableBootstrap bool
    70  
    71  	// LogOutput is the location to write logs to. If this is not set,
    72  	// logs will go to stderr.
    73  	LogOutput io.Writer
    74  
    75  	// ProtocolVersion is the protocol version to speak. This must be between
    76  	// ProtocolVersionMin and ProtocolVersionMax.
    77  	ProtocolVersion uint8
    78  
    79  	// RPCAddr is the RPC address used by Nomad. This should be reachable
    80  	// by the other servers and clients
    81  	RPCAddr *net.TCPAddr
    82  
    83  	// RPCAdvertise is the address that is advertised to other nodes for
    84  	// the RPC endpoint. This can differ from the RPC address, if for example
    85  	// the RPCAddr is unspecified "0.0.0.0:4646", but this address must be
    86  	// reachable
    87  	RPCAdvertise *net.TCPAddr
    88  
    89  	// RaftConfig is the configuration used for Raft in the local DC
    90  	RaftConfig *raft.Config
    91  
    92  	// RaftTimeout is applied to any network traffic for raft. Defaults to 10s.
    93  	RaftTimeout time.Duration
    94  
    95  	// SerfConfig is the configuration for the serf cluster
    96  	SerfConfig *serf.Config
    97  
    98  	// Node name is the name we use to advertise. Defaults to hostname.
    99  	NodeName string
   100  
   101  	// Region is the region this Nomad server belongs to.
   102  	Region string
   103  
   104  	// Datacenter is the datacenter this Nomad server belongs to.
   105  	Datacenter string
   106  
   107  	// Build is a string that is gossiped around, and can be used to help
   108  	// operators track which versions are actively deployed
   109  	Build string
   110  
   111  	// NumSchedulers is the number of scheduler thread that are run.
   112  	// This can be as many as one per core, or zero to disable this server
   113  	// from doing any scheduling work.
   114  	NumSchedulers int
   115  
   116  	// EnabledSchedulers controls the set of sub-schedulers that are
   117  	// enabled for this server to handle. This will restrict the evaluations
   118  	// that the workers dequeue for processing.
   119  	EnabledSchedulers []string
   120  
   121  	// ReconcileInterval controls how often we reconcile the strongly
   122  	// consistent store with the Serf info. This is used to handle nodes
   123  	// that are force removed, as well as intermittent unavailability during
   124  	// leader election.
   125  	ReconcileInterval time.Duration
   126  
   127  	// EvalGCInterval is how often we dispatch a job to GC evaluations
   128  	EvalGCInterval time.Duration
   129  
   130  	// EvalGCThreshold is how "old" an evaluation must be to be eligible
   131  	// for GC. This gives users some time to debug a failed evaluation.
   132  	EvalGCThreshold time.Duration
   133  
   134  	// JobGCInterval is how often we dispatch a job to GC jobs that are
   135  	// available for garbage collection.
   136  	JobGCInterval time.Duration
   137  
   138  	// JobGCThreshold is how old a job must be before it eligible for GC. This gives
   139  	// the user time to inspect the job.
   140  	JobGCThreshold time.Duration
   141  
   142  	// NodeGCInterval is how often we dispatch a job to GC failed nodes.
   143  	NodeGCInterval time.Duration
   144  
   145  	// NodeGCThreshold is how "old" a nodemust be to be eligible
   146  	// for GC. This gives users some time to view and debug a failed nodes.
   147  	NodeGCThreshold time.Duration
   148  
   149  	// EvalNackTimeout controls how long we allow a sub-scheduler to
   150  	// work on an evaluation before we consider it failed and Nack it.
   151  	// This allows that evaluation to be handed to another sub-scheduler
   152  	// to work on. Defaults to 60 seconds. This should be long enough that
   153  	// no evaluation hits it unless the sub-scheduler has failed.
   154  	EvalNackTimeout time.Duration
   155  
   156  	// EvalDeliveryLimit is the limit of attempts we make to deliver and
   157  	// process an evaluation. This is used so that an eval that will never
   158  	// complete eventually fails out of the system.
   159  	EvalDeliveryLimit int
   160  
   161  	// EvalNackInitialReenqueueDelay is the delay applied before reenqueuing a
   162  	// Nacked evaluation for the first time. This value should be small as the
   163  	// initial Nack can be due to a down machine and the eval should be retried
   164  	// quickly for liveliness.
   165  	EvalNackInitialReenqueueDelay time.Duration
   166  
   167  	// EvalNackSubsequentReenqueueDelay is the delay applied before reenqueuing
   168  	// an evaluation that has been Nacked more than once. This delay is
   169  	// compounding after the first Nack. This value should be significantly
   170  	// longer than the initial delay as the purpose it severs is to apply
   171  	// back-pressure as evaluatiions are being Nacked either due to scheduler
   172  	// failures or because they are hitting their Nack timeout, both of which
   173  	// are signs of high server resource usage.
   174  	EvalNackSubsequentReenqueueDelay time.Duration
   175  
   176  	// EvalFailedFollowupBaselineDelay is the minimum time waited before
   177  	// retrying a failed evaluation.
   178  	EvalFailedFollowupBaselineDelay time.Duration
   179  
   180  	// EvalFailedFollowupDelayRange defines the range of additional time from
   181  	// the baseline in which to wait before retrying a failed evaluation. The
   182  	// additional delay is selected from this range randomly.
   183  	EvalFailedFollowupDelayRange time.Duration
   184  
   185  	// MinHeartbeatTTL is the minimum time between heartbeats.
   186  	// This is used as a floor to prevent excessive updates.
   187  	MinHeartbeatTTL time.Duration
   188  
   189  	// MaxHeartbeatsPerSecond is the maximum target rate of heartbeats
   190  	// being processed per second. This allows the TTL to be increased
   191  	// to meet the target rate.
   192  	MaxHeartbeatsPerSecond float64
   193  
   194  	// HeartbeatGrace is the additional time given as a grace period
   195  	// beyond the TTL to account for network and processing delays
   196  	// as well as clock skew.
   197  	HeartbeatGrace time.Duration
   198  
   199  	// FailoverHeartbeatTTL is the TTL applied to heartbeats after
   200  	// a new leader is elected, since we no longer know the status
   201  	// of all the heartbeats.
   202  	FailoverHeartbeatTTL time.Duration
   203  
   204  	// ConsulConfig is this Agent's Consul configuration
   205  	ConsulConfig *config.ConsulConfig
   206  
   207  	// VaultConfig is this Agent's Vault configuration
   208  	VaultConfig *config.VaultConfig
   209  
   210  	// RPCHoldTimeout is how long an RPC can be "held" before it is errored.
   211  	// This is used to paper over a loss of leadership by instead holding RPCs,
   212  	// so that the caller experiences a slow response rather than an error.
   213  	// This period is meant to be long enough for a leader election to take
   214  	// place, and a small jitter is applied to avoid a thundering herd.
   215  	RPCHoldTimeout time.Duration
   216  
   217  	// TLSConfig holds various TLS related configurations
   218  	TLSConfig *config.TLSConfig
   219  }
   220  
   221  // CheckVersion is used to check if the ProtocolVersion is valid
   222  func (c *Config) CheckVersion() error {
   223  	if c.ProtocolVersion < ProtocolVersionMin {
   224  		return fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]",
   225  			c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
   226  	} else if c.ProtocolVersion > ProtocolVersionMax {
   227  		return fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]",
   228  			c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
   229  	}
   230  	return nil
   231  }
   232  
   233  // DefaultConfig returns the default configuration
   234  func DefaultConfig() *Config {
   235  	hostname, err := os.Hostname()
   236  	if err != nil {
   237  		panic(err)
   238  	}
   239  
   240  	c := &Config{
   241  		Region:                           DefaultRegion,
   242  		Datacenter:                       DefaultDC,
   243  		NodeName:                         hostname,
   244  		ProtocolVersion:                  ProtocolVersionMax,
   245  		RaftConfig:                       raft.DefaultConfig(),
   246  		RaftTimeout:                      10 * time.Second,
   247  		LogOutput:                        os.Stderr,
   248  		RPCAddr:                          DefaultRPCAddr,
   249  		SerfConfig:                       serf.DefaultConfig(),
   250  		NumSchedulers:                    1,
   251  		ReconcileInterval:                60 * time.Second,
   252  		EvalGCInterval:                   5 * time.Minute,
   253  		EvalGCThreshold:                  1 * time.Hour,
   254  		JobGCInterval:                    5 * time.Minute,
   255  		JobGCThreshold:                   4 * time.Hour,
   256  		NodeGCInterval:                   5 * time.Minute,
   257  		NodeGCThreshold:                  24 * time.Hour,
   258  		EvalNackTimeout:                  60 * time.Second,
   259  		EvalDeliveryLimit:                3,
   260  		EvalNackInitialReenqueueDelay:    1 * time.Second,
   261  		EvalNackSubsequentReenqueueDelay: 20 * time.Second,
   262  		EvalFailedFollowupBaselineDelay:  1 * time.Minute,
   263  		EvalFailedFollowupDelayRange:     5 * time.Minute,
   264  		MinHeartbeatTTL:                  10 * time.Second,
   265  		MaxHeartbeatsPerSecond:           50.0,
   266  		HeartbeatGrace:                   10 * time.Second,
   267  		FailoverHeartbeatTTL:             300 * time.Second,
   268  		ConsulConfig:                     config.DefaultConsulConfig(),
   269  		VaultConfig:                      config.DefaultVaultConfig(),
   270  		RPCHoldTimeout:                   5 * time.Second,
   271  		TLSConfig:                        &config.TLSConfig{},
   272  	}
   273  
   274  	// Enable all known schedulers by default
   275  	c.EnabledSchedulers = make([]string, 0, len(scheduler.BuiltinSchedulers))
   276  	for name := range scheduler.BuiltinSchedulers {
   277  		c.EnabledSchedulers = append(c.EnabledSchedulers, name)
   278  	}
   279  	c.EnabledSchedulers = append(c.EnabledSchedulers, structs.JobTypeCore)
   280  
   281  	// Default the number of schedulers to match the coores
   282  	c.NumSchedulers = runtime.NumCPU()
   283  
   284  	// Increase our reap interval to 3 days instead of 24h.
   285  	c.SerfConfig.ReconnectTimeout = 3 * 24 * time.Hour
   286  
   287  	// Serf should use the WAN timing, since we are using it
   288  	// to communicate between DC's
   289  	c.SerfConfig.MemberlistConfig = memberlist.DefaultWANConfig()
   290  	c.SerfConfig.MemberlistConfig.BindPort = DefaultSerfPort
   291  
   292  	// Disable shutdown on removal
   293  	c.RaftConfig.ShutdownOnRemove = false
   294  
   295  	// Enable interoperability with unversioned Raft library, and don't
   296  	// start using new ID-based features yet.
   297  	c.RaftConfig.ProtocolVersion = 1
   298  
   299  	return c
   300  }
   301  
   302  // tlsConfig returns a TLSUtil Config based on the server configuration
   303  func (c *Config) tlsConfig() *tlsutil.Config {
   304  	tlsConf := &tlsutil.Config{
   305  		VerifyIncoming:       true,
   306  		VerifyOutgoing:       true,
   307  		VerifyServerHostname: c.TLSConfig.VerifyServerHostname,
   308  		CAFile:               c.TLSConfig.CAFile,
   309  		CertFile:             c.TLSConfig.CertFile,
   310  		KeyFile:              c.TLSConfig.KeyFile,
   311  	}
   312  	return tlsConf
   313  }