github.com/hernad/nomad@v1.6.112/command/agent/command.go (about) 1 // Copyright (c) HashiCorp, Inc. 2 // SPDX-License-Identifier: MPL-2.0 3 4 package agent 5 6 import ( 7 "flag" 8 "fmt" 9 "io" 10 "log" 11 "os" 12 "os/signal" 13 "path/filepath" 14 "reflect" 15 "sort" 16 "strconv" 17 "strings" 18 "syscall" 19 "time" 20 21 metrics "github.com/armon/go-metrics" 22 "github.com/armon/go-metrics/circonus" 23 "github.com/armon/go-metrics/datadog" 24 "github.com/armon/go-metrics/prometheus" 25 checkpoint "github.com/hashicorp/go-checkpoint" 26 discover "github.com/hashicorp/go-discover" 27 hclog "github.com/hashicorp/go-hclog" 28 gsyslog "github.com/hashicorp/go-syslog" 29 "github.com/hashicorp/logutils" 30 "github.com/hernad/nomad/helper" 31 flaghelper "github.com/hernad/nomad/helper/flags" 32 gatedwriter "github.com/hernad/nomad/helper/gated-writer" 33 "github.com/hernad/nomad/helper/logging" 34 "github.com/hernad/nomad/helper/winsvc" 35 "github.com/hernad/nomad/nomad/structs" 36 "github.com/hernad/nomad/nomad/structs/config" 37 "github.com/hernad/nomad/version" 38 "github.com/mitchellh/cli" 39 "github.com/posener/complete" 40 ) 41 42 // gracefulTimeout controls how long we wait before forcefully terminating 43 const gracefulTimeout = 5 * time.Second 44 45 // Command is a Command implementation that runs a Nomad agent. 46 // The command will not end unless a shutdown message is sent on the 47 // ShutdownCh. If two messages are sent on the ShutdownCh it will forcibly 48 // exit. 49 type Command struct { 50 Version *version.VersionInfo 51 Ui cli.Ui 52 ShutdownCh <-chan struct{} 53 54 args []string 55 agent *Agent 56 httpServers []*HTTPServer 57 logFilter *logutils.LevelFilter 58 logOutput io.Writer 59 retryJoinErrCh chan struct{} 60 } 61 62 func (c *Command) readConfig() *Config { 63 var dev *devModeConfig 64 var configPath []string 65 var servers string 66 var meta []string 67 68 // Make a new, empty config. 69 cmdConfig := &Config{ 70 Client: &ClientConfig{}, 71 Consul: &config.ConsulConfig{}, 72 Ports: &Ports{}, 73 Server: &ServerConfig{ 74 ServerJoin: &ServerJoin{}, 75 }, 76 Vault: &config.VaultConfig{}, 77 ACL: &ACLConfig{}, 78 Audit: &config.AuditConfig{}, 79 } 80 81 flags := flag.NewFlagSet("agent", flag.ContinueOnError) 82 flags.Usage = func() { c.Ui.Error(c.Help()) } 83 84 // Role options 85 var devMode bool 86 var devConnectMode bool 87 flags.BoolVar(&devMode, "dev", false, "") 88 flags.BoolVar(&devConnectMode, "dev-connect", false, "") 89 flags.BoolVar(&cmdConfig.Server.Enabled, "server", false, "") 90 flags.BoolVar(&cmdConfig.Client.Enabled, "client", false, "") 91 92 // Server-only options 93 flags.IntVar(&cmdConfig.Server.BootstrapExpect, "bootstrap-expect", 0, "") 94 flags.StringVar(&cmdConfig.Server.EncryptKey, "encrypt", "", "gossip encryption key") 95 flags.IntVar(&cmdConfig.Server.RaftProtocol, "raft-protocol", 0, "") 96 flags.BoolVar(&cmdConfig.Server.RejoinAfterLeave, "rejoin", false, "") 97 flags.Var((*flaghelper.StringFlag)(&cmdConfig.Server.ServerJoin.StartJoin), "join", "") 98 flags.Var((*flaghelper.StringFlag)(&cmdConfig.Server.ServerJoin.RetryJoin), "retry-join", "") 99 flags.IntVar(&cmdConfig.Server.ServerJoin.RetryMaxAttempts, "retry-max", 0, "") 100 flags.Var((flaghelper.FuncDurationVar)(func(d time.Duration) error { 101 cmdConfig.Server.ServerJoin.RetryInterval = d 102 return nil 103 }), "retry-interval", "") 104 105 // Client-only options 106 flags.StringVar(&cmdConfig.Client.StateDir, "state-dir", "", "") 107 flags.StringVar(&cmdConfig.Client.AllocDir, "alloc-dir", "", "") 108 flags.StringVar(&cmdConfig.Client.NodeClass, "node-class", "", "") 109 flags.StringVar(&cmdConfig.Client.NodePool, "node-pool", "", "") 110 flags.StringVar(&servers, "servers", "", "") 111 flags.Var((*flaghelper.StringFlag)(&meta), "meta", "") 112 flags.StringVar(&cmdConfig.Client.NetworkInterface, "network-interface", "", "") 113 flags.IntVar(&cmdConfig.Client.NetworkSpeed, "network-speed", 0, "") 114 115 // General options 116 flags.Var((*flaghelper.StringFlag)(&configPath), "config", "config") 117 flags.StringVar(&cmdConfig.BindAddr, "bind", "", "") 118 flags.StringVar(&cmdConfig.Region, "region", "", "") 119 flags.StringVar(&cmdConfig.DataDir, "data-dir", "", "") 120 flags.StringVar(&cmdConfig.PluginDir, "plugin-dir", "", "") 121 flags.StringVar(&cmdConfig.Datacenter, "dc", "", "") 122 flags.StringVar(&cmdConfig.LogLevel, "log-level", "", "") 123 flags.BoolVar(&cmdConfig.LogJson, "log-json", false, "") 124 flags.StringVar(&cmdConfig.NodeName, "node", "", "") 125 126 // Consul options 127 flags.StringVar(&cmdConfig.Consul.Auth, "consul-auth", "", "") 128 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 129 cmdConfig.Consul.AutoAdvertise = &b 130 return nil 131 }), "consul-auto-advertise", "") 132 flags.StringVar(&cmdConfig.Consul.CAFile, "consul-ca-file", "", "") 133 flags.StringVar(&cmdConfig.Consul.CertFile, "consul-cert-file", "", "") 134 flags.StringVar(&cmdConfig.Consul.KeyFile, "consul-key-file", "", "") 135 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 136 cmdConfig.Consul.ChecksUseAdvertise = &b 137 return nil 138 }), "consul-checks-use-advertise", "") 139 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 140 cmdConfig.Consul.ClientAutoJoin = &b 141 return nil 142 }), "consul-client-auto-join", "") 143 flags.StringVar(&cmdConfig.Consul.ClientServiceName, "consul-client-service-name", "", "") 144 flags.StringVar(&cmdConfig.Consul.ClientHTTPCheckName, "consul-client-http-check-name", "", "") 145 flags.StringVar(&cmdConfig.Consul.ServerServiceName, "consul-server-service-name", "", "") 146 flags.StringVar(&cmdConfig.Consul.ServerHTTPCheckName, "consul-server-http-check-name", "", "") 147 flags.StringVar(&cmdConfig.Consul.ServerSerfCheckName, "consul-server-serf-check-name", "", "") 148 flags.StringVar(&cmdConfig.Consul.ServerRPCCheckName, "consul-server-rpc-check-name", "", "") 149 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 150 cmdConfig.Consul.ServerAutoJoin = &b 151 return nil 152 }), "consul-server-auto-join", "") 153 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 154 cmdConfig.Consul.EnableSSL = &b 155 return nil 156 }), "consul-ssl", "") 157 flags.StringVar(&cmdConfig.Consul.Token, "consul-token", "", "") 158 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 159 cmdConfig.Consul.VerifySSL = &b 160 return nil 161 }), "consul-verify-ssl", "") 162 flags.StringVar(&cmdConfig.Consul.Addr, "consul-address", "", "") 163 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 164 cmdConfig.Consul.AllowUnauthenticated = &b 165 return nil 166 }), "consul-allow-unauthenticated", "") 167 168 // Vault options 169 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 170 cmdConfig.Vault.Enabled = &b 171 return nil 172 }), "vault-enabled", "") 173 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 174 cmdConfig.Vault.AllowUnauthenticated = &b 175 return nil 176 }), "vault-allow-unauthenticated", "") 177 flags.StringVar(&cmdConfig.Vault.Token, "vault-token", "", "") 178 flags.StringVar(&cmdConfig.Vault.Addr, "vault-address", "", "") 179 flags.StringVar(&cmdConfig.Vault.Namespace, "vault-namespace", "", "") 180 flags.StringVar(&cmdConfig.Vault.Role, "vault-create-from-role", "", "") 181 flags.StringVar(&cmdConfig.Vault.TLSCaFile, "vault-ca-file", "", "") 182 flags.StringVar(&cmdConfig.Vault.TLSCaPath, "vault-ca-path", "", "") 183 flags.StringVar(&cmdConfig.Vault.TLSCertFile, "vault-cert-file", "", "") 184 flags.StringVar(&cmdConfig.Vault.TLSKeyFile, "vault-key-file", "", "") 185 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 186 cmdConfig.Vault.TLSSkipVerify = &b 187 return nil 188 }), "vault-tls-skip-verify", "") 189 flags.StringVar(&cmdConfig.Vault.TLSServerName, "vault-tls-server-name", "", "") 190 191 // ACL options 192 flags.BoolVar(&cmdConfig.ACL.Enabled, "acl-enabled", false, "") 193 flags.StringVar(&cmdConfig.ACL.ReplicationToken, "acl-replication-token", "", "") 194 195 if err := flags.Parse(c.args); err != nil { 196 return nil 197 } 198 199 // Split the servers. 200 if servers != "" { 201 cmdConfig.Client.Servers = strings.Split(servers, ",") 202 } 203 204 // Parse the meta flags. 205 metaLength := len(meta) 206 if metaLength != 0 { 207 cmdConfig.Client.Meta = make(map[string]string, metaLength) 208 for _, kv := range meta { 209 parts := strings.SplitN(kv, "=", 2) 210 if len(parts) != 2 { 211 c.Ui.Error(fmt.Sprintf("Error parsing Client.Meta value: %v", kv)) 212 return nil 213 } 214 cmdConfig.Client.Meta[parts[0]] = parts[1] 215 } 216 } 217 218 // Load the configuration 219 dev, err := newDevModeConfig(devMode, devConnectMode) 220 if err != nil { 221 c.Ui.Error(err.Error()) 222 return nil 223 } 224 var config *Config 225 if dev != nil { 226 config = DevConfig(dev) 227 } else { 228 config = DefaultConfig() 229 } 230 231 // Merge in the enterprise overlay 232 config = config.Merge(DefaultEntConfig()) 233 234 for _, path := range configPath { 235 current, err := LoadConfig(path) 236 if err != nil { 237 c.Ui.Error(fmt.Sprintf( 238 "Error loading configuration from %s: %s", path, err)) 239 return nil 240 } 241 242 // The user asked us to load some config here but we didn't find any, 243 // so we'll complain but continue. 244 if current == nil || reflect.DeepEqual(current, &Config{}) { 245 c.Ui.Warn(fmt.Sprintf("No configuration loaded from %s", path)) 246 } 247 248 if config == nil { 249 config = current 250 } else { 251 config = config.Merge(current) 252 } 253 } 254 255 // Ensure the sub-structs at least exist 256 if config.Client == nil { 257 config.Client = &ClientConfig{} 258 } 259 260 if config.Server == nil { 261 config.Server = &ServerConfig{} 262 } 263 264 // Merge any CLI options over config file options 265 config = config.Merge(cmdConfig) 266 267 // Set the version info 268 config.Version = c.Version 269 270 // Normalize binds, ports, addresses, and advertise 271 if err := config.normalizeAddrs(); err != nil { 272 c.Ui.Error(err.Error()) 273 return nil 274 } 275 276 // Check to see if we should read the Vault token from the environment 277 if config.Vault.Token == "" { 278 config.Vault.Token = os.Getenv("VAULT_TOKEN") 279 } 280 281 // Check to see if we should read the Vault namespace from the environment 282 if config.Vault.Namespace == "" { 283 config.Vault.Namespace = os.Getenv("VAULT_NAMESPACE") 284 } 285 286 // Default the plugin directory to be under that of the data directory if it 287 // isn't explicitly specified. 288 if config.PluginDir == "" && config.DataDir != "" { 289 config.PluginDir = filepath.Join(config.DataDir, "plugins") 290 } 291 292 // License configuration options 293 config.Server.LicenseEnv = os.Getenv("NOMAD_LICENSE") 294 if config.Server.LicensePath == "" { 295 config.Server.LicensePath = os.Getenv("NOMAD_LICENSE_PATH") 296 } 297 298 config.Server.DefaultSchedulerConfig.Canonicalize() 299 300 if !c.IsValidConfig(config, cmdConfig) { 301 return nil 302 } 303 304 return config 305 } 306 307 func (c *Command) IsValidConfig(config, cmdConfig *Config) bool { 308 309 // Check that the server is running in at least one mode. 310 if !(config.Server.Enabled || config.Client.Enabled) { 311 c.Ui.Error("Must specify either server, client or dev mode for the agent.") 312 return false 313 } 314 315 // Check that the region does not contain invalid characters 316 if strings.ContainsAny(config.Region, "\000") { 317 c.Ui.Error("Region contains invalid characters") 318 return false 319 } 320 321 // Check that the datacenter name does not contain invalid characters 322 if strings.ContainsAny(config.Datacenter, "\000*") { 323 c.Ui.Error("Datacenter contains invalid characters (null or '*')") 324 return false 325 } 326 327 // Set up the TLS configuration properly if we have one. 328 // XXX chelseakomlo: set up a TLSConfig New method which would wrap 329 // constructor-type actions like this. 330 if config.TLSConfig != nil && !config.TLSConfig.IsEmpty() { 331 if err := config.TLSConfig.SetChecksum(); err != nil { 332 c.Ui.Error(fmt.Sprintf("WARNING: Error when parsing TLS configuration: %v", err)) 333 } 334 } 335 if !config.DevMode && (config.TLSConfig == nil || 336 !config.TLSConfig.EnableHTTP || !config.TLSConfig.EnableRPC) { 337 c.Ui.Error("WARNING: mTLS is not configured - Nomad is not secure without mTLS!") 338 } 339 340 if config.Server.EncryptKey != "" { 341 if _, err := config.Server.EncryptBytes(); err != nil { 342 c.Ui.Error(fmt.Sprintf("Invalid encryption key: %s", err)) 343 return false 344 } 345 keyfile := filepath.Join(config.DataDir, serfKeyring) 346 if _, err := os.Stat(keyfile); err == nil { 347 c.Ui.Warn("WARNING: keyring exists but -encrypt given, using keyring") 348 } 349 } 350 351 // Verify the paths are absolute. 352 dirs := map[string]string{ 353 "data-dir": config.DataDir, 354 "plugin-dir": config.PluginDir, 355 "alloc-dir": config.Client.AllocDir, 356 "state-dir": config.Client.StateDir, 357 } 358 for k, dir := range dirs { 359 if dir == "" { 360 continue 361 } 362 363 if !filepath.IsAbs(dir) { 364 c.Ui.Error(fmt.Sprintf("%s must be given as an absolute path: got %v", k, dir)) 365 return false 366 } 367 } 368 369 if config.Client.Enabled { 370 for k := range config.Client.Meta { 371 if !helper.IsValidInterpVariable(k) { 372 c.Ui.Error(fmt.Sprintf("Invalid Client.Meta key: %v", k)) 373 return false 374 } 375 } 376 } 377 378 if err := config.Server.DefaultSchedulerConfig.Validate(); err != nil { 379 c.Ui.Error(err.Error()) 380 return false 381 } 382 383 // Validate node pool name early to prevent agent from starting but the 384 // client failing to register. 385 if pool := config.Client.NodePool; pool != "" { 386 if err := structs.ValidateNodePoolName(pool); err != nil { 387 c.Ui.Error(fmt.Sprintf("Invalid node pool: %v", err)) 388 return false 389 } 390 if pool == structs.NodePoolAll { 391 c.Ui.Error(fmt.Sprintf("Invalid node pool: node is not allowed to register in node pool %q", structs.NodePoolAll)) 392 return false 393 } 394 } 395 396 for _, volumeConfig := range config.Client.HostVolumes { 397 if volumeConfig.Path == "" { 398 c.Ui.Error("Missing path in host_volume config") 399 return false 400 } 401 } 402 403 if config.Client.MinDynamicPort < 0 || config.Client.MinDynamicPort > structs.MaxValidPort { 404 c.Ui.Error(fmt.Sprintf("Invalid dynamic port range: min_dynamic_port=%d", config.Client.MinDynamicPort)) 405 return false 406 } 407 if config.Client.MaxDynamicPort < 0 || config.Client.MaxDynamicPort > structs.MaxValidPort { 408 c.Ui.Error(fmt.Sprintf("Invalid dynamic port range: max_dynamic_port=%d", config.Client.MaxDynamicPort)) 409 return false 410 } 411 if config.Client.MinDynamicPort > config.Client.MaxDynamicPort { 412 c.Ui.Error(fmt.Sprintf("Invalid dynamic port range: min_dynamic_port=%d and max_dynamic_port=%d", config.Client.MinDynamicPort, config.Client.MaxDynamicPort)) 413 return false 414 } 415 416 if config.Client.Reserved == nil { 417 // Coding error; should always be set by DefaultConfig() 418 c.Ui.Error("client.reserved must be initialized. Please report a bug.") 419 return false 420 } 421 422 if ports := config.Client.Reserved.ReservedPorts; ports != "" { 423 if _, err := structs.ParsePortRanges(ports); err != nil { 424 c.Ui.Error(fmt.Sprintf("reserved.reserved_ports %q invalid: %v", ports, err)) 425 return false 426 } 427 } 428 429 for _, hn := range config.Client.HostNetworks { 430 // Ensure port range is valid 431 if _, err := structs.ParsePortRanges(hn.ReservedPorts); err != nil { 432 c.Ui.Error(fmt.Sprintf("host_network[%q].reserved_ports %q invalid: %v", 433 hn.Name, hn.ReservedPorts, err)) 434 return false 435 } 436 } 437 438 if err := config.Client.Artifact.Validate(); err != nil { 439 c.Ui.Error(fmt.Sprintf("client.artifact block invalid: %v", err)) 440 return false 441 } 442 443 if !config.DevMode { 444 // Ensure that we have the directories we need to run. 445 if config.Server.Enabled && config.DataDir == "" { 446 c.Ui.Error(`Must specify "data_dir" config option or "data-dir" CLI flag`) 447 return false 448 } 449 450 // The config is valid if the top-level data-dir is set or if both 451 // alloc-dir and state-dir are set. 452 if config.Client.Enabled && config.DataDir == "" { 453 if config.Client.AllocDir == "" || config.Client.StateDir == "" || config.PluginDir == "" { 454 c.Ui.Error("Must specify the state, alloc dir, and plugin dir if data-dir is omitted.") 455 return false 456 } 457 } 458 459 // Check the bootstrap flags 460 if !config.Server.Enabled && cmdConfig.Server.BootstrapExpect > 0 { 461 // report an error if BootstrapExpect is set in CLI but server is disabled 462 c.Ui.Error("Bootstrap requires server mode to be enabled") 463 return false 464 } 465 if config.Server.Enabled && config.Server.BootstrapExpect == 1 { 466 c.Ui.Error("WARNING: Bootstrap mode enabled! Potentially unsafe operation.") 467 } 468 if config.Server.Enabled && config.Server.BootstrapExpect%2 == 0 { 469 c.Ui.Error("WARNING: Number of bootstrap servers should ideally be set to an odd number.") 470 } 471 } 472 473 // ProtocolVersion has never been used. Warn if it is set as someone 474 // has probably made a mistake. 475 if config.Server.ProtocolVersion != 0 { 476 c.Ui.Warn("Please remove deprecated protocol_version field from config.") 477 } 478 479 return true 480 } 481 482 // SetupLoggers is used to set up the logGate, and our logOutput 483 func SetupLoggers(ui cli.Ui, config *Config) (*logutils.LevelFilter, *gatedwriter.Writer, io.Writer) { 484 // Setup logging. First create the gated log writer, which will 485 // store logs until we're ready to show them. Then create the level 486 // filter, filtering logs of the specified level. 487 logGate := &gatedwriter.Writer{ 488 Writer: &cli.UiWriter{Ui: ui}, 489 } 490 491 logFilter := LevelFilter() 492 logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel)) 493 logFilter.Writer = logGate 494 if !ValidateLevelFilter(logFilter.MinLevel, logFilter) { 495 ui.Error(fmt.Sprintf( 496 "Invalid log level: %s. Valid log levels are: %v", 497 logFilter.MinLevel, logFilter.Levels)) 498 return nil, nil, nil 499 } 500 501 // Create a log writer, and wrap a logOutput around it 502 writers := []io.Writer{logFilter} 503 logLevel := strings.ToUpper(config.LogLevel) 504 logLevelMap := map[string]gsyslog.Priority{ 505 "ERROR": gsyslog.LOG_ERR, 506 "WARN": gsyslog.LOG_WARNING, 507 "INFO": gsyslog.LOG_INFO, 508 "DEBUG": gsyslog.LOG_DEBUG, 509 "TRACE": gsyslog.LOG_DEBUG, 510 } 511 if logLevel == "OFF" { 512 config.EnableSyslog = false 513 } 514 // Check if syslog is enabled 515 if config.EnableSyslog { 516 ui.Output(fmt.Sprintf("Config enable_syslog is `true` with log_level=%v", config.LogLevel)) 517 l, err := gsyslog.NewLogger(logLevelMap[logLevel], config.SyslogFacility, "nomad") 518 if err != nil { 519 ui.Error(fmt.Sprintf("Syslog setup failed: %v", err)) 520 return nil, nil, nil 521 } 522 writers = append(writers, &SyslogWrapper{l, logFilter}) 523 } 524 525 // Check if file logging is enabled 526 if config.LogFile != "" { 527 dir, fileName := filepath.Split(config.LogFile) 528 529 // if a path is provided, but has no filename, then a default is used. 530 if fileName == "" { 531 fileName = "nomad.log" 532 } 533 534 // Try to enter the user specified log rotation duration first 535 var logRotateDuration time.Duration 536 if config.LogRotateDuration != "" { 537 duration, err := time.ParseDuration(config.LogRotateDuration) 538 if err != nil { 539 ui.Error(fmt.Sprintf("Failed to parse log rotation duration: %v", err)) 540 return nil, nil, nil 541 } 542 logRotateDuration = duration 543 } else { 544 // Default to 24 hrs if no rotation period is specified 545 logRotateDuration = 24 * time.Hour 546 } 547 548 logFile := &logFile{ 549 logFilter: logFilter, 550 fileName: fileName, 551 logPath: dir, 552 duration: logRotateDuration, 553 MaxBytes: config.LogRotateBytes, 554 MaxFiles: config.LogRotateMaxFiles, 555 } 556 557 writers = append(writers, logFile) 558 } 559 560 logOutput := io.MultiWriter(writers...) 561 return logFilter, logGate, logOutput 562 } 563 564 // setupAgent is used to start the agent and various interfaces 565 func (c *Command) setupAgent(config *Config, logger hclog.InterceptLogger, logOutput io.Writer, inmem *metrics.InmemSink) error { 566 c.Ui.Output("Starting Nomad agent...") 567 568 agent, err := NewAgent(config, logger, logOutput, inmem) 569 if err != nil { 570 // log the error as well, so it appears at the end 571 logger.Error("error starting agent", "error", err) 572 c.Ui.Error(fmt.Sprintf("Error starting agent: %s", err)) 573 return err 574 } 575 c.agent = agent 576 577 // Setup the HTTP server 578 httpServers, err := NewHTTPServers(agent, config) 579 if err != nil { 580 agent.Shutdown() 581 c.Ui.Error(fmt.Sprintf("Error starting http server: %s", err)) 582 return err 583 } 584 c.httpServers = httpServers 585 586 // If DisableUpdateCheck is not enabled, set up update checking 587 // (DisableUpdateCheck is false by default) 588 if config.DisableUpdateCheck != nil && !*config.DisableUpdateCheck { 589 version := config.Version.Version 590 if config.Version.VersionPrerelease != "" { 591 version += fmt.Sprintf("-%s", config.Version.VersionPrerelease) 592 } 593 updateParams := &checkpoint.CheckParams{ 594 Product: "nomad", 595 Version: version, 596 } 597 if !config.DisableAnonymousSignature { 598 updateParams.SignatureFile = filepath.Join(config.DataDir, "checkpoint-signature") 599 } 600 601 // Schedule a periodic check with expected interval of 24 hours 602 checkpoint.CheckInterval(updateParams, 24*time.Hour, c.checkpointResults) 603 604 // Do an immediate check within the next 30 seconds 605 go func() { 606 time.Sleep(helper.RandomStagger(30 * time.Second)) 607 c.checkpointResults(checkpoint.Check(updateParams)) 608 }() 609 } 610 611 return nil 612 } 613 614 // checkpointResults is used to handler periodic results from our update checker 615 func (c *Command) checkpointResults(results *checkpoint.CheckResponse, err error) { 616 if err != nil { 617 c.Ui.Error(fmt.Sprintf("Failed to check for updates: %v", err)) 618 return 619 } 620 if results.Outdated { 621 c.Ui.Error(fmt.Sprintf("Newer Nomad version available: %s (currently running: %s)", results.CurrentVersion, c.Version.VersionNumber())) 622 } 623 for _, alert := range results.Alerts { 624 switch alert.Level { 625 case "info": 626 c.Ui.Info(fmt.Sprintf("Bulletin [%s]: %s (%s)", alert.Level, alert.Message, alert.URL)) 627 default: 628 c.Ui.Error(fmt.Sprintf("Bulletin [%s]: %s (%s)", alert.Level, alert.Message, alert.URL)) 629 } 630 } 631 } 632 633 func (c *Command) AutocompleteFlags() complete.Flags { 634 configFilePredictor := complete.PredictOr( 635 complete.PredictFiles("*.json"), 636 complete.PredictFiles("*.hcl")) 637 638 return map[string]complete.Predictor{ 639 "-dev": complete.PredictNothing, 640 "-dev-connect": complete.PredictNothing, 641 "-server": complete.PredictNothing, 642 "-client": complete.PredictNothing, 643 "-bootstrap-expect": complete.PredictAnything, 644 "-encrypt": complete.PredictAnything, 645 "-raft-protocol": complete.PredictAnything, 646 "-rejoin": complete.PredictNothing, 647 "-join": complete.PredictAnything, 648 "-retry-join": complete.PredictAnything, 649 "-retry-max": complete.PredictAnything, 650 "-state-dir": complete.PredictDirs("*"), 651 "-alloc-dir": complete.PredictDirs("*"), 652 "-node-class": complete.PredictAnything, 653 "-node-pool": complete.PredictAnything, 654 "-servers": complete.PredictAnything, 655 "-meta": complete.PredictAnything, 656 "-config": configFilePredictor, 657 "-bind": complete.PredictAnything, 658 "-region": complete.PredictAnything, 659 "-data-dir": complete.PredictDirs("*"), 660 "-plugin-dir": complete.PredictDirs("*"), 661 "-dc": complete.PredictAnything, 662 "-log-level": complete.PredictAnything, 663 "-json-logs": complete.PredictNothing, 664 "-node": complete.PredictAnything, 665 "-consul-auth": complete.PredictAnything, 666 "-consul-auto-advertise": complete.PredictNothing, 667 "-consul-ca-file": complete.PredictAnything, 668 "-consul-cert-file": complete.PredictAnything, 669 "-consul-key-file": complete.PredictAnything, 670 "-consul-checks-use-advertise": complete.PredictNothing, 671 "-consul-client-auto-join": complete.PredictNothing, 672 "-consul-client-service-name": complete.PredictAnything, 673 "-consul-client-http-check-name": complete.PredictAnything, 674 "-consul-server-service-name": complete.PredictAnything, 675 "-consul-server-http-check-name": complete.PredictAnything, 676 "-consul-server-serf-check-name": complete.PredictAnything, 677 "-consul-server-rpc-check-name": complete.PredictAnything, 678 "-consul-server-auto-join": complete.PredictNothing, 679 "-consul-ssl": complete.PredictNothing, 680 "-consul-verify-ssl": complete.PredictNothing, 681 "-consul-address": complete.PredictAnything, 682 "-consul-token": complete.PredictAnything, 683 "-vault-enabled": complete.PredictNothing, 684 "-vault-allow-unauthenticated": complete.PredictNothing, 685 "-vault-token": complete.PredictAnything, 686 "-vault-address": complete.PredictAnything, 687 "-vault-create-from-role": complete.PredictAnything, 688 "-vault-ca-file": complete.PredictAnything, 689 "-vault-ca-path": complete.PredictAnything, 690 "-vault-cert-file": complete.PredictAnything, 691 "-vault-key-file": complete.PredictAnything, 692 "-vault-tls-skip-verify": complete.PredictNothing, 693 "-vault-tls-server-name": complete.PredictAnything, 694 "-acl-enabled": complete.PredictNothing, 695 "-acl-replication-token": complete.PredictAnything, 696 } 697 } 698 699 func (c *Command) AutocompleteArgs() complete.Predictor { 700 return nil 701 } 702 703 func (c *Command) Run(args []string) int { 704 c.Ui = &cli.PrefixedUi{ 705 OutputPrefix: "==> ", 706 InfoPrefix: " ", 707 ErrorPrefix: "==> ", 708 Ui: c.Ui, 709 } 710 711 // Parse our configs 712 c.args = args 713 config := c.readConfig() 714 if config == nil { 715 return 1 716 } 717 718 // reset UI to prevent prefixed json output 719 if config.LogJson { 720 c.Ui = &cli.BasicUi{ 721 Reader: os.Stdin, 722 Writer: os.Stdout, 723 ErrorWriter: os.Stderr, 724 } 725 } 726 727 // Setup the log outputs 728 logFilter, logGate, logOutput := SetupLoggers(c.Ui, config) 729 c.logFilter = logFilter 730 c.logOutput = logOutput 731 if logGate == nil { 732 return 1 733 } 734 735 // Create logger 736 logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ 737 Name: "agent", 738 Level: hclog.LevelFromString(config.LogLevel), 739 Output: logOutput, 740 JSONFormat: config.LogJson, 741 }) 742 743 // Wrap log messages emitted with the 'log' package. 744 // These usually come from external dependencies. 745 log.SetOutput(logger.StandardWriter(&hclog.StandardLoggerOptions{ 746 InferLevels: true, 747 InferLevelsWithTimestamp: true, 748 })) 749 log.SetPrefix("") 750 log.SetFlags(0) 751 752 // Swap out UI implementation if json logging is enabled 753 if config.LogJson { 754 c.Ui = &logging.HcLogUI{Log: logger} 755 // Don't buffer json logs because they aren't reordered anyway. 756 logGate.Flush() 757 } 758 759 // Log config files 760 if len(config.Files) > 0 { 761 c.Ui.Output(fmt.Sprintf("Loaded configuration from %s", strings.Join(config.Files, ", "))) 762 } else { 763 c.Ui.Output("No configuration files loaded") 764 } 765 766 // Initialize the telemetry 767 inmem, err := c.setupTelemetry(config) 768 if err != nil { 769 c.Ui.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) 770 return 1 771 } 772 773 // Create the agent 774 if err := c.setupAgent(config, logger, logOutput, inmem); err != nil { 775 logGate.Flush() 776 return 1 777 } 778 779 defer func() { 780 c.agent.Shutdown() 781 782 // Shutdown the http server at the end, to ease debugging if 783 // the agent takes long to shutdown 784 if len(c.httpServers) > 0 { 785 for _, srv := range c.httpServers { 786 srv.Shutdown() 787 } 788 } 789 }() 790 791 // Join startup nodes if specified 792 if err := c.startupJoin(config); err != nil { 793 c.Ui.Error(err.Error()) 794 return 1 795 } 796 797 // Compile agent information for output later 798 info := make(map[string]string) 799 info["version"] = config.Version.VersionNumber() 800 info["client"] = strconv.FormatBool(config.Client.Enabled) 801 info["log level"] = config.LogLevel 802 info["server"] = strconv.FormatBool(config.Server.Enabled) 803 info["region"] = fmt.Sprintf("%s (DC: %s)", config.Region, config.Datacenter) 804 info["bind addrs"] = c.getBindAddrSynopsis() 805 info["advertise addrs"] = c.getAdvertiseAddrSynopsis() 806 if config.Server.Enabled { 807 serverConfig, err := c.agent.serverConfig() 808 if err == nil { 809 info["node id"] = serverConfig.NodeID 810 } 811 } 812 813 // Sort the keys for output 814 infoKeys := make([]string, 0, len(info)) 815 for key := range info { 816 infoKeys = append(infoKeys, key) 817 } 818 sort.Strings(infoKeys) 819 820 // Agent configuration output 821 padding := 18 822 c.Ui.Output("Nomad agent configuration:\n") 823 for _, k := range infoKeys { 824 c.Ui.Info(fmt.Sprintf( 825 "%s%s: %s", 826 strings.Repeat(" ", padding-len(k)), 827 strings.Title(k), 828 info[k])) 829 } 830 c.Ui.Output("") 831 832 // Output the header that the server has started 833 c.Ui.Output("Nomad agent started! Log data will stream in below:\n") 834 835 // Enable log streaming 836 logGate.Flush() 837 838 // Start retry join process 839 if err := c.handleRetryJoin(config); err != nil { 840 c.Ui.Error(err.Error()) 841 return 1 842 } 843 844 // Wait for exit 845 return c.handleSignals() 846 } 847 848 // handleRetryJoin is used to start retry joining if it is configured. 849 func (c *Command) handleRetryJoin(config *Config) error { 850 c.retryJoinErrCh = make(chan struct{}) 851 852 if config.Server.Enabled && len(config.Server.RetryJoin) != 0 { 853 joiner := retryJoiner{ 854 discover: &discover.Discover{}, 855 errCh: c.retryJoinErrCh, 856 logger: c.agent.logger.Named("joiner"), 857 serverJoin: c.agent.server.Join, 858 serverEnabled: true, 859 } 860 861 if err := joiner.Validate(config); err != nil { 862 return err 863 } 864 865 // Remove the duplicate fields 866 if len(config.Server.RetryJoin) != 0 { 867 config.Server.ServerJoin.RetryJoin = config.Server.RetryJoin 868 config.Server.RetryJoin = nil 869 } 870 if config.Server.RetryMaxAttempts != 0 { 871 config.Server.ServerJoin.RetryMaxAttempts = config.Server.RetryMaxAttempts 872 config.Server.RetryMaxAttempts = 0 873 } 874 if config.Server.RetryInterval != 0 { 875 config.Server.ServerJoin.RetryInterval = config.Server.RetryInterval 876 config.Server.RetryInterval = 0 877 } 878 879 c.agent.logger.Warn("using deprecated retry_join fields. Upgrade configuration to use server_join") 880 } 881 882 if config.Server.Enabled && 883 config.Server.ServerJoin != nil && 884 len(config.Server.ServerJoin.RetryJoin) != 0 { 885 886 joiner := retryJoiner{ 887 discover: &discover.Discover{}, 888 errCh: c.retryJoinErrCh, 889 logger: c.agent.logger.Named("joiner"), 890 serverJoin: c.agent.server.Join, 891 serverEnabled: true, 892 } 893 894 if err := joiner.Validate(config); err != nil { 895 return err 896 } 897 898 go joiner.RetryJoin(config.Server.ServerJoin) 899 } 900 901 if config.Client.Enabled && 902 config.Client.ServerJoin != nil && 903 len(config.Client.ServerJoin.RetryJoin) != 0 { 904 joiner := retryJoiner{ 905 discover: &discover.Discover{}, 906 errCh: c.retryJoinErrCh, 907 logger: c.agent.logger.Named("joiner"), 908 clientJoin: c.agent.client.SetServers, 909 clientEnabled: true, 910 } 911 912 if err := joiner.Validate(config); err != nil { 913 return err 914 } 915 916 go joiner.RetryJoin(config.Client.ServerJoin) 917 } 918 919 return nil 920 } 921 922 // handleSignals blocks until we get an exit-causing signal 923 func (c *Command) handleSignals() int { 924 signalCh := make(chan os.Signal, 4) 925 signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGPIPE) 926 927 // Wait for a signal 928 WAIT: 929 var sig os.Signal 930 select { 931 case s := <-signalCh: 932 sig = s 933 case <-winsvc.ShutdownChannel(): 934 sig = os.Interrupt 935 case <-c.ShutdownCh: 936 sig = os.Interrupt 937 case <-c.retryJoinErrCh: 938 return 1 939 } 940 941 // Skip any SIGPIPE signal and don't try to log it (See issues #1798, #3554) 942 if sig == syscall.SIGPIPE { 943 goto WAIT 944 } 945 946 c.Ui.Output(fmt.Sprintf("Caught signal: %v", sig)) 947 948 // Check if this is a SIGHUP 949 if sig == syscall.SIGHUP { 950 c.handleReload() 951 goto WAIT 952 } 953 954 // Check if we should do a graceful leave 955 graceful := false 956 if sig == os.Interrupt && c.agent.GetConfig().LeaveOnInt { 957 graceful = true 958 } else if sig == syscall.SIGTERM && c.agent.GetConfig().LeaveOnTerm { 959 graceful = true 960 } 961 962 // Bail fast if not doing a graceful leave 963 if !graceful { 964 return 1 965 } 966 967 // Attempt a graceful leave 968 gracefulCh := make(chan struct{}) 969 c.Ui.Output("Gracefully shutting down agent...") 970 go func() { 971 if err := c.agent.Leave(); err != nil { 972 c.Ui.Error(fmt.Sprintf("Error: %s", err)) 973 return 974 } 975 close(gracefulCh) 976 }() 977 978 // Wait for leave or another signal 979 select { 980 case <-signalCh: 981 return 1 982 case <-time.After(gracefulTimeout): 983 return 1 984 case <-gracefulCh: 985 return 0 986 } 987 } 988 989 // reloadHTTPServer shuts down the existing HTTP server and restarts it. This 990 // is helpful when reloading the agent configuration. 991 func (c *Command) reloadHTTPServer() error { 992 c.agent.logger.Info("reloading HTTP server with new TLS configuration") 993 994 for _, srv := range c.httpServers { 995 srv.Shutdown() 996 } 997 998 httpServers, err := NewHTTPServers(c.agent, c.agent.config) 999 if err != nil { 1000 return err 1001 } 1002 c.httpServers = httpServers 1003 1004 return nil 1005 } 1006 1007 // handleReload is invoked when we should reload our configs, e.g. SIGHUP 1008 func (c *Command) handleReload() { 1009 c.Ui.Output("Reloading configuration...") 1010 newConf := c.readConfig() 1011 if newConf == nil { 1012 c.Ui.Error("Failed to reload configs") 1013 return 1014 } 1015 1016 // Change the log level 1017 minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel)) 1018 if ValidateLevelFilter(minLevel, c.logFilter) { 1019 c.logFilter.SetMinLevel(minLevel) 1020 } else { 1021 c.Ui.Error(fmt.Sprintf( 1022 "Invalid log level: %s. Valid log levels are: %v", 1023 minLevel, c.logFilter.Levels)) 1024 1025 // Keep the current log level 1026 newConf.LogLevel = c.agent.GetConfig().LogLevel 1027 } 1028 1029 shouldReloadAgent, shouldReloadHTTP := c.agent.ShouldReload(newConf) 1030 if shouldReloadAgent { 1031 c.agent.logger.Debug("starting reload of agent config") 1032 err := c.agent.Reload(newConf) 1033 if err != nil { 1034 c.agent.logger.Error("failed to reload the config", "error", err) 1035 return 1036 } 1037 } 1038 1039 if s := c.agent.Server(); s != nil { 1040 c.agent.logger.Debug("starting reload of server config") 1041 sconf, err := convertServerConfig(newConf) 1042 if err != nil { 1043 c.agent.logger.Error("failed to convert server config", "error", err) 1044 return 1045 } 1046 1047 // Finalize the config to get the agent objects injected in 1048 c.agent.finalizeServerConfig(sconf) 1049 1050 // Reload the config 1051 if err := s.Reload(sconf); err != nil { 1052 c.agent.logger.Error("reloading server config failed", "error", err) 1053 return 1054 } 1055 } 1056 1057 if client := c.agent.Client(); client != nil { 1058 c.agent.logger.Debug("starting reload of client config") 1059 clientConfig, err := convertClientConfig(newConf) 1060 if err != nil { 1061 c.agent.logger.Error("failed to convert client config", "error", err) 1062 return 1063 } 1064 1065 // Finalize the config to get the agent objects injected in 1066 if err := c.agent.finalizeClientConfig(clientConfig); err != nil { 1067 c.agent.logger.Error("failed to finalize client config", "error", err) 1068 return 1069 } 1070 1071 if err := client.Reload(clientConfig); err != nil { 1072 c.agent.logger.Error("reloading client config failed", "error", err) 1073 return 1074 } 1075 } 1076 1077 // reload HTTP server after we have reloaded both client and server, in case 1078 // we error in either of the above cases. For example, reloading the http 1079 // server to a TLS connection could succeed, while reloading the server's rpc 1080 // connections could fail. 1081 if shouldReloadHTTP { 1082 err := c.reloadHTTPServer() 1083 if err != nil { 1084 c.agent.httpLogger.Error("reloading config failed", "error", err) 1085 return 1086 } 1087 } 1088 } 1089 1090 // setupTelemetry is used ot setup the telemetry sub-systems 1091 func (c *Command) setupTelemetry(config *Config) (*metrics.InmemSink, error) { 1092 /* Setup telemetry 1093 Aggregate on 10 second intervals for 1 minute. Expose the 1094 metrics over stderr when there is a SIGUSR1 received. 1095 */ 1096 inm := metrics.NewInmemSink(10*time.Second, time.Minute) 1097 metrics.DefaultInmemSignal(inm) 1098 1099 var telConfig *Telemetry 1100 if config.Telemetry == nil { 1101 telConfig = &Telemetry{} 1102 } else { 1103 telConfig = config.Telemetry 1104 } 1105 1106 metricsConf := metrics.DefaultConfig("nomad") 1107 metricsConf.EnableHostname = !telConfig.DisableHostname 1108 1109 // Prefer the hostname as a label. 1110 metricsConf.EnableHostnameLabel = !telConfig.DisableHostname 1111 1112 if telConfig.UseNodeName { 1113 metricsConf.HostName = config.NodeName 1114 metricsConf.EnableHostname = true 1115 } 1116 1117 allowedPrefixes, blockedPrefixes, err := telConfig.PrefixFilters() 1118 if err != nil { 1119 return inm, err 1120 } 1121 1122 metricsConf.AllowedPrefixes = allowedPrefixes 1123 metricsConf.BlockedPrefixes = blockedPrefixes 1124 1125 if telConfig.FilterDefault != nil { 1126 metricsConf.FilterDefault = *telConfig.FilterDefault 1127 } 1128 1129 // Configure the statsite sink 1130 var fanout metrics.FanoutSink 1131 if telConfig.StatsiteAddr != "" { 1132 sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr) 1133 if err != nil { 1134 return inm, err 1135 } 1136 fanout = append(fanout, sink) 1137 } 1138 1139 // Configure the statsd sink 1140 if telConfig.StatsdAddr != "" { 1141 sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr) 1142 if err != nil { 1143 return inm, err 1144 } 1145 fanout = append(fanout, sink) 1146 } 1147 1148 // Configure the prometheus sink 1149 if telConfig.PrometheusMetrics { 1150 promSink, err := prometheus.NewPrometheusSink() 1151 if err != nil { 1152 return inm, err 1153 } 1154 fanout = append(fanout, promSink) 1155 } 1156 1157 // Configure the datadog sink 1158 if telConfig.DataDogAddr != "" { 1159 sink, err := datadog.NewDogStatsdSink(telConfig.DataDogAddr, config.NodeName) 1160 if err != nil { 1161 return inm, err 1162 } 1163 sink.SetTags(telConfig.DataDogTags) 1164 fanout = append(fanout, sink) 1165 } 1166 1167 // Configure the Circonus sink 1168 if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" { 1169 cfg := &circonus.Config{} 1170 cfg.Interval = telConfig.CirconusSubmissionInterval 1171 cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken 1172 cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp 1173 cfg.CheckManager.API.URL = telConfig.CirconusAPIURL 1174 cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL 1175 cfg.CheckManager.Check.ID = telConfig.CirconusCheckID 1176 cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation 1177 cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID 1178 cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag 1179 cfg.CheckManager.Check.Tags = telConfig.CirconusCheckTags 1180 cfg.CheckManager.Check.DisplayName = telConfig.CirconusCheckDisplayName 1181 cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID 1182 cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag 1183 1184 if cfg.CheckManager.Check.DisplayName == "" { 1185 cfg.CheckManager.Check.DisplayName = "Nomad" 1186 } 1187 1188 if cfg.CheckManager.API.TokenApp == "" { 1189 cfg.CheckManager.API.TokenApp = "nomad" 1190 } 1191 1192 if cfg.CheckManager.Check.SearchTag == "" { 1193 cfg.CheckManager.Check.SearchTag = "service:nomad" 1194 } 1195 1196 sink, err := circonus.NewCirconusSink(cfg) 1197 if err != nil { 1198 return inm, err 1199 } 1200 sink.Start() 1201 fanout = append(fanout, sink) 1202 } 1203 1204 // Initialize the global sink 1205 if len(fanout) > 0 { 1206 fanout = append(fanout, inm) 1207 metrics.NewGlobal(metricsConf, fanout) 1208 } else { 1209 metricsConf.EnableHostname = false 1210 metrics.NewGlobal(metricsConf, inm) 1211 } 1212 1213 return inm, nil 1214 } 1215 1216 func (c *Command) startupJoin(config *Config) error { 1217 // Nothing to do 1218 if !config.Server.Enabled { 1219 return nil 1220 } 1221 1222 // Validate both old and new aren't being set 1223 old := len(config.Server.StartJoin) 1224 var new int 1225 if config.Server.ServerJoin != nil { 1226 new = len(config.Server.ServerJoin.StartJoin) 1227 } 1228 if old != 0 && new != 0 { 1229 return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join block") 1230 } 1231 1232 // Nothing to do 1233 if old+new == 0 { 1234 return nil 1235 } 1236 1237 // Combine the lists and join 1238 joining := config.Server.StartJoin 1239 if new != 0 { 1240 joining = append(joining, config.Server.ServerJoin.StartJoin...) 1241 } 1242 1243 c.Ui.Output("Joining cluster...") 1244 n, err := c.agent.server.Join(joining) 1245 if err != nil { 1246 return err 1247 } 1248 1249 c.Ui.Output(fmt.Sprintf("Join completed. Synced with %d initial agents", n)) 1250 return nil 1251 } 1252 1253 // getBindAddrSynopsis returns a string that describes the addresses the agent 1254 // is bound to. 1255 func (c *Command) getBindAddrSynopsis() string { 1256 if c == nil || c.agent == nil || c.agent.config == nil || c.agent.config.normalizedAddrs == nil { 1257 return "" 1258 } 1259 1260 b := new(strings.Builder) 1261 fmt.Fprintf(b, "HTTP: %s", c.agent.config.normalizedAddrs.HTTP) 1262 1263 if c.agent.server != nil { 1264 if c.agent.config.normalizedAddrs.RPC != "" { 1265 fmt.Fprintf(b, "; RPC: %s", c.agent.config.normalizedAddrs.RPC) 1266 } 1267 if c.agent.config.normalizedAddrs.Serf != "" { 1268 fmt.Fprintf(b, "; Serf: %s", c.agent.config.normalizedAddrs.Serf) 1269 } 1270 } 1271 1272 return b.String() 1273 } 1274 1275 // getAdvertiseAddrSynopsis returns a string that describes the addresses the agent 1276 // is advertising. 1277 func (c *Command) getAdvertiseAddrSynopsis() string { 1278 if c == nil || c.agent == nil || c.agent.config == nil || c.agent.config.AdvertiseAddrs == nil { 1279 return "" 1280 } 1281 1282 b := new(strings.Builder) 1283 fmt.Fprintf(b, "HTTP: %s", c.agent.config.AdvertiseAddrs.HTTP) 1284 1285 if c.agent.server != nil { 1286 if c.agent.config.AdvertiseAddrs.RPC != "" { 1287 fmt.Fprintf(b, "; RPC: %s", c.agent.config.AdvertiseAddrs.RPC) 1288 } 1289 if c.agent.config.AdvertiseAddrs.Serf != "" { 1290 fmt.Fprintf(b, "; Serf: %s", c.agent.config.AdvertiseAddrs.Serf) 1291 } 1292 } 1293 1294 return b.String() 1295 } 1296 1297 func (c *Command) Synopsis() string { 1298 return "Runs a Nomad agent" 1299 } 1300 1301 func (c *Command) Help() string { 1302 helpText := ` 1303 Usage: nomad agent [options] 1304 1305 Starts the Nomad agent and runs until an interrupt is received. 1306 The agent may be a client and/or server. 1307 1308 The Nomad agent's configuration primarily comes from the config 1309 files used, but a subset of the options may also be passed directly 1310 as CLI arguments, listed below. 1311 1312 General Options (clients and servers): 1313 1314 -bind=<addr> 1315 The address the agent will bind to for all of its various network 1316 services. The individual services that run bind to individual 1317 ports on this address. Defaults to the loopback 127.0.0.1. 1318 1319 -config=<path> 1320 The path to either a single config file or a directory of config 1321 files to use for configuring the Nomad agent. This option may be 1322 specified multiple times. If multiple config files are used, the 1323 values from each will be merged together. During merging, values 1324 from files found later in the list are merged over values from 1325 previously parsed files. 1326 1327 -data-dir=<path> 1328 The data directory used to store state and other persistent data. 1329 On client machines this is used to house allocation data such as 1330 downloaded artifacts used by drivers. On server nodes, the data 1331 dir is also used to store the replicated log. 1332 1333 -plugin-dir=<path> 1334 The plugin directory is used to discover Nomad plugins. If not specified, 1335 the plugin directory defaults to be that of <data-dir>/plugins/. 1336 1337 -dc=<datacenter> 1338 The name of the datacenter this Nomad agent is a member of. By 1339 default this is set to "dc1". 1340 1341 -log-level=<level> 1342 Specify the verbosity level of Nomad's logs. Valid values include 1343 DEBUG, INFO, and WARN, in decreasing order of verbosity. The 1344 default is INFO. 1345 1346 -log-json 1347 Output logs in a JSON format. The default is false. 1348 1349 -node=<name> 1350 The name of the local agent. This name is used to identify the node 1351 in the cluster. The name must be unique per region. The default is 1352 the current hostname of the machine. 1353 1354 -region=<region> 1355 Name of the region the Nomad agent will be a member of. By default 1356 this value is set to "global". 1357 1358 -dev 1359 Start the agent in development mode. This enables a pre-configured 1360 dual-role agent (client + server) which is useful for developing 1361 or testing Nomad. No other configuration is required to start the 1362 agent in this mode, but you may pass an optional comma-separated 1363 list of mode configurations: 1364 1365 -dev-connect 1366 Start the agent in development mode, but bind to a public network 1367 interface rather than localhost for using Consul Connect. This 1368 mode is supported only on Linux as root. 1369 1370 Server Options: 1371 1372 -server 1373 Enable server mode for the agent. Agents in server mode are 1374 clustered together and handle the additional responsibility of 1375 leader election, data replication, and scheduling work onto 1376 eligible client nodes. 1377 1378 -bootstrap-expect=<num> 1379 Configures the expected number of servers nodes to wait for before 1380 bootstrapping the cluster. Once <num> servers have joined each other, 1381 Nomad initiates the bootstrap process. 1382 1383 -encrypt=<key> 1384 Provides the gossip encryption key 1385 1386 -join=<address> 1387 Address of an agent to join at start time. Can be specified 1388 multiple times. 1389 1390 -raft-protocol=<num> 1391 The Raft protocol version to use. Used for enabling certain Autopilot 1392 features. Defaults to 2. 1393 1394 -retry-join=<address> 1395 Address of an agent to join at start time with retries enabled. 1396 Can be specified multiple times. 1397 1398 -retry-max=<num> 1399 Maximum number of join attempts. Defaults to 0, which will retry 1400 indefinitely. 1401 1402 -retry-interval=<dur> 1403 Time to wait between join attempts. 1404 1405 -rejoin 1406 Ignore a previous leave and attempts to rejoin the cluster. 1407 1408 Client Options: 1409 1410 -client 1411 Enable client mode for the agent. Client mode enables a given node to be 1412 evaluated for allocations. If client mode is not enabled, no work will be 1413 scheduled to the agent. 1414 1415 -state-dir 1416 The directory used to store state and other persistent data. If not 1417 specified a subdirectory under the "-data-dir" will be used. 1418 1419 -alloc-dir 1420 The directory used to store allocation data such as downloaded artifacts as 1421 well as data produced by tasks. If not specified, a subdirectory under the 1422 "-data-dir" will be used. 1423 1424 -servers 1425 A list of known server addresses to connect to given as "host:port" and 1426 delimited by commas. 1427 1428 -node-class 1429 Mark this node as a member of a node-class. This can be used to label 1430 similar node types. 1431 1432 -node-pool 1433 Register this node in this node pool. If the node pool does not exist it 1434 will be created automatically if the node registers in the authoritative 1435 region. In non-authoritative regions, the node is kept in the 1436 'initializing' status until the node pool is created and replicated. 1437 1438 -meta 1439 User specified metadata to associated with the node. Each instance of -meta 1440 parses a single KEY=VALUE pair. Repeat the meta flag for each key/value pair 1441 to be added. 1442 1443 -network-interface 1444 Forces the network fingerprinter to use the specified network interface. 1445 1446 -network-speed 1447 The default speed for network interfaces in MBits if the link speed can not 1448 be determined dynamically. 1449 1450 ACL Options: 1451 1452 -acl-enabled 1453 Specifies whether the agent should enable ACLs. 1454 1455 -acl-replication-token 1456 The replication token for servers to use when replicating from the 1457 authoritative region. The token must be a valid management token from the 1458 authoritative region. 1459 1460 Consul Options: 1461 1462 -consul-address=<addr> 1463 Specifies the address to the local Consul agent, given in the format host:port. 1464 Supports Unix sockets with the format: unix:///tmp/consul/consul.sock 1465 1466 -consul-auth=<auth> 1467 Specifies the HTTP Basic Authentication information to use for access to the 1468 Consul Agent, given in the format username:password. 1469 1470 -consul-auto-advertise 1471 Specifies if Nomad should advertise its services in Consul. The services 1472 are named according to server_service_name and client_service_name. Nomad 1473 servers and clients advertise their respective services, each tagged 1474 appropriately with either http or rpc tag. Nomad servers also advertise a 1475 serf tagged service. 1476 1477 -consul-ca-file=<path> 1478 Specifies an optional path to the CA certificate used for Consul communication. 1479 This defaults to the system bundle if unspecified. 1480 1481 -consul-cert-file=<path> 1482 Specifies the path to the certificate used for Consul communication. If this 1483 is set then you need to also set key_file. 1484 1485 -consul-checks-use-advertise 1486 Specifies if Consul heath checks should bind to the advertise address. By 1487 default, this is the bind address. 1488 1489 -consul-client-auto-join 1490 Specifies if the Nomad clients should automatically discover servers in the 1491 same region by searching for the Consul service name defined in the 1492 server_service_name option. 1493 1494 -consul-client-service-name=<name> 1495 Specifies the name of the service in Consul for the Nomad clients. 1496 1497 -consul-client-http-check-name=<name> 1498 Specifies the HTTP health check name in Consul for the Nomad clients. 1499 1500 -consul-key-file=<path> 1501 Specifies the path to the private key used for Consul communication. If this 1502 is set then you need to also set cert_file. 1503 1504 -consul-server-service-name=<name> 1505 Specifies the name of the service in Consul for the Nomad servers. 1506 1507 -consul-server-http-check-name=<name> 1508 Specifies the HTTP health check name in Consul for the Nomad servers. 1509 1510 -consul-server-serf-check-name=<name> 1511 Specifies the Serf health check name in Consul for the Nomad servers. 1512 1513 -consul-server-rpc-check-name=<name> 1514 Specifies the RPC health check name in Consul for the Nomad servers. 1515 1516 -consul-server-auto-join 1517 Specifies if the Nomad servers should automatically discover and join other 1518 Nomad servers by searching for the Consul service name defined in the 1519 server_service_name option. This search only happens if the server does not 1520 have a leader. 1521 1522 -consul-ssl 1523 Specifies if the transport scheme should use HTTPS to communicate with the 1524 Consul agent. 1525 1526 -consul-token=<token> 1527 Specifies the token used to provide a per-request ACL token. 1528 1529 -consul-verify-ssl 1530 Specifies if SSL peer verification should be used when communicating to the 1531 Consul API client over HTTPS. 1532 1533 Vault Options: 1534 1535 -vault-enabled 1536 Whether to enable or disable Vault integration. 1537 1538 -vault-address=<addr> 1539 The address to communicate with Vault. This should be provided with the http:// 1540 or https:// prefix. 1541 1542 -vault-token=<token> 1543 The Vault token used to derive tokens from Vault on behalf of clients. 1544 This only needs to be set on Servers. Overrides the Vault token read from 1545 the VAULT_TOKEN environment variable. 1546 1547 -vault-create-from-role=<role> 1548 The role name to create tokens for tasks from. 1549 1550 -vault-allow-unauthenticated 1551 Whether to allow jobs to be submitted that request Vault Tokens but do not 1552 authentication. The flag only applies to Servers. 1553 1554 -vault-ca-file=<path> 1555 The path to a PEM-encoded CA cert file to use to verify the Vault server SSL 1556 certificate. 1557 1558 -vault-ca-path=<path> 1559 The path to a directory of PEM-encoded CA cert files to verify the Vault server 1560 certificate. 1561 1562 -vault-cert-file=<token> 1563 The path to the certificate for Vault communication. 1564 1565 -vault-key-file=<addr> 1566 The path to the private key for Vault communication. 1567 1568 -vault-tls-skip-verify=<token> 1569 Enables or disables SSL certificate verification. 1570 1571 -vault-tls-server-name=<token> 1572 Used to set the SNI host when connecting over TLS. 1573 ` 1574 return strings.TrimSpace(helpText) 1575 }