github.com/smithx10/nomad@v0.9.1-rc1/command/agent/command.go (about) 1 package agent 2 3 import ( 4 "flag" 5 "fmt" 6 "io" 7 "log" 8 "os" 9 "os/signal" 10 "path/filepath" 11 "reflect" 12 "sort" 13 "strconv" 14 "strings" 15 "syscall" 16 "time" 17 18 "github.com/armon/go-metrics" 19 "github.com/armon/go-metrics/circonus" 20 "github.com/armon/go-metrics/datadog" 21 "github.com/armon/go-metrics/prometheus" 22 "github.com/hashicorp/consul/lib" 23 "github.com/hashicorp/go-checkpoint" 24 "github.com/hashicorp/go-discover" 25 "github.com/hashicorp/go-hclog" 26 gsyslog "github.com/hashicorp/go-syslog" 27 "github.com/hashicorp/logutils" 28 "github.com/hashicorp/nomad/helper" 29 flaghelper "github.com/hashicorp/nomad/helper/flag-helpers" 30 gatedwriter "github.com/hashicorp/nomad/helper/gated-writer" 31 "github.com/hashicorp/nomad/helper/logging" 32 "github.com/hashicorp/nomad/nomad/structs/config" 33 "github.com/hashicorp/nomad/version" 34 "github.com/mitchellh/cli" 35 "github.com/posener/complete" 36 ) 37 38 // gracefulTimeout controls how long we wait before forcefully terminating 39 const gracefulTimeout = 5 * time.Second 40 41 // Command is a Command implementation that runs a Nomad agent. 42 // The command will not end unless a shutdown message is sent on the 43 // ShutdownCh. If two messages are sent on the ShutdownCh it will forcibly 44 // exit. 45 type Command struct { 46 Version *version.VersionInfo 47 Ui cli.Ui 48 ShutdownCh <-chan struct{} 49 50 args []string 51 agent *Agent 52 httpServer *HTTPServer 53 logFilter *logutils.LevelFilter 54 logOutput io.Writer 55 retryJoinErrCh chan struct{} 56 } 57 58 func (c *Command) readConfig() *Config { 59 var dev bool 60 var configPath []string 61 var servers string 62 var meta []string 63 64 // Make a new, empty config. 65 cmdConfig := &Config{ 66 Client: &ClientConfig{}, 67 Consul: &config.ConsulConfig{}, 68 Ports: &Ports{}, 69 Server: &ServerConfig{ 70 ServerJoin: &ServerJoin{}, 71 }, 72 Vault: &config.VaultConfig{}, 73 ACL: &ACLConfig{}, 74 } 75 76 flags := flag.NewFlagSet("agent", flag.ContinueOnError) 77 flags.Usage = func() { c.Ui.Error(c.Help()) } 78 79 // Role options 80 flags.BoolVar(&dev, "dev", false, "") 81 flags.BoolVar(&cmdConfig.Server.Enabled, "server", false, "") 82 flags.BoolVar(&cmdConfig.Client.Enabled, "client", false, "") 83 84 // Server-only options 85 flags.IntVar(&cmdConfig.Server.BootstrapExpect, "bootstrap-expect", 0, "") 86 flags.StringVar(&cmdConfig.Server.EncryptKey, "encrypt", "", "gossip encryption key") 87 flags.IntVar(&cmdConfig.Server.RaftProtocol, "raft-protocol", 0, "") 88 flags.BoolVar(&cmdConfig.Server.RejoinAfterLeave, "rejoin", false, "") 89 flags.Var((*flaghelper.StringFlag)(&cmdConfig.Server.ServerJoin.StartJoin), "join", "") 90 flags.Var((*flaghelper.StringFlag)(&cmdConfig.Server.ServerJoin.RetryJoin), "retry-join", "") 91 flags.IntVar(&cmdConfig.Server.ServerJoin.RetryMaxAttempts, "retry-max", 0, "") 92 flags.Var((flaghelper.FuncDurationVar)(func(d time.Duration) error { 93 cmdConfig.Server.ServerJoin.RetryInterval = d 94 return nil 95 }), "retry-interval", "") 96 97 // Client-only options 98 flags.StringVar(&cmdConfig.Client.StateDir, "state-dir", "", "") 99 flags.StringVar(&cmdConfig.Client.AllocDir, "alloc-dir", "", "") 100 flags.StringVar(&cmdConfig.Client.NodeClass, "node-class", "", "") 101 flags.StringVar(&servers, "servers", "", "") 102 flags.Var((*flaghelper.StringFlag)(&meta), "meta", "") 103 flags.StringVar(&cmdConfig.Client.NetworkInterface, "network-interface", "", "") 104 flags.IntVar(&cmdConfig.Client.NetworkSpeed, "network-speed", 0, "") 105 106 // General options 107 flags.Var((*flaghelper.StringFlag)(&configPath), "config", "config") 108 flags.StringVar(&cmdConfig.BindAddr, "bind", "", "") 109 flags.StringVar(&cmdConfig.Region, "region", "", "") 110 flags.StringVar(&cmdConfig.DataDir, "data-dir", "", "") 111 flags.StringVar(&cmdConfig.PluginDir, "plugin-dir", "", "") 112 flags.StringVar(&cmdConfig.Datacenter, "dc", "", "") 113 flags.StringVar(&cmdConfig.LogLevel, "log-level", "", "") 114 flags.BoolVar(&cmdConfig.LogJson, "log-json", false, "") 115 flags.StringVar(&cmdConfig.NodeName, "node", "", "") 116 117 // Consul options 118 flags.StringVar(&cmdConfig.Consul.Auth, "consul-auth", "", "") 119 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 120 cmdConfig.Consul.AutoAdvertise = &b 121 return nil 122 }), "consul-auto-advertise", "") 123 flags.StringVar(&cmdConfig.Consul.CAFile, "consul-ca-file", "", "") 124 flags.StringVar(&cmdConfig.Consul.CertFile, "consul-cert-file", "", "") 125 flags.StringVar(&cmdConfig.Consul.KeyFile, "consul-key-file", "", "") 126 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 127 cmdConfig.Consul.ChecksUseAdvertise = &b 128 return nil 129 }), "consul-checks-use-advertise", "") 130 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 131 cmdConfig.Consul.ClientAutoJoin = &b 132 return nil 133 }), "consul-client-auto-join", "") 134 flags.StringVar(&cmdConfig.Consul.ClientServiceName, "consul-client-service-name", "", "") 135 flags.StringVar(&cmdConfig.Consul.ClientHTTPCheckName, "consul-client-http-check-name", "", "") 136 flags.StringVar(&cmdConfig.Consul.ServerServiceName, "consul-server-service-name", "", "") 137 flags.StringVar(&cmdConfig.Consul.ServerHTTPCheckName, "consul-server-http-check-name", "", "") 138 flags.StringVar(&cmdConfig.Consul.ServerSerfCheckName, "consul-server-serf-check-name", "", "") 139 flags.StringVar(&cmdConfig.Consul.ServerRPCCheckName, "consul-server-rpc-check-name", "", "") 140 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 141 cmdConfig.Consul.ServerAutoJoin = &b 142 return nil 143 }), "consul-server-auto-join", "") 144 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 145 cmdConfig.Consul.EnableSSL = &b 146 return nil 147 }), "consul-ssl", "") 148 flags.StringVar(&cmdConfig.Consul.Token, "consul-token", "", "") 149 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 150 cmdConfig.Consul.VerifySSL = &b 151 return nil 152 }), "consul-verify-ssl", "") 153 flags.StringVar(&cmdConfig.Consul.Addr, "consul-address", "", "") 154 155 // Vault options 156 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 157 cmdConfig.Vault.Enabled = &b 158 return nil 159 }), "vault-enabled", "") 160 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 161 cmdConfig.Vault.AllowUnauthenticated = &b 162 return nil 163 }), "vault-allow-unauthenticated", "") 164 flags.StringVar(&cmdConfig.Vault.Token, "vault-token", "", "") 165 flags.StringVar(&cmdConfig.Vault.Addr, "vault-address", "", "") 166 flags.StringVar(&cmdConfig.Vault.Role, "vault-create-from-role", "", "") 167 flags.StringVar(&cmdConfig.Vault.TLSCaFile, "vault-ca-file", "", "") 168 flags.StringVar(&cmdConfig.Vault.TLSCaPath, "vault-ca-path", "", "") 169 flags.StringVar(&cmdConfig.Vault.TLSCertFile, "vault-cert-file", "", "") 170 flags.StringVar(&cmdConfig.Vault.TLSKeyFile, "vault-key-file", "", "") 171 flags.Var((flaghelper.FuncBoolVar)(func(b bool) error { 172 cmdConfig.Vault.TLSSkipVerify = &b 173 return nil 174 }), "vault-tls-skip-verify", "") 175 flags.StringVar(&cmdConfig.Vault.TLSServerName, "vault-tls-server-name", "", "") 176 177 // ACL options 178 flags.BoolVar(&cmdConfig.ACL.Enabled, "acl-enabled", false, "") 179 flags.StringVar(&cmdConfig.ACL.ReplicationToken, "acl-replication-token", "", "") 180 181 if err := flags.Parse(c.args); err != nil { 182 return nil 183 } 184 185 // Split the servers. 186 if servers != "" { 187 cmdConfig.Client.Servers = strings.Split(servers, ",") 188 } 189 190 // Parse the meta flags. 191 metaLength := len(meta) 192 if metaLength != 0 { 193 cmdConfig.Client.Meta = make(map[string]string, metaLength) 194 for _, kv := range meta { 195 parts := strings.SplitN(kv, "=", 2) 196 if len(parts) != 2 { 197 c.Ui.Error(fmt.Sprintf("Error parsing Client.Meta value: %v", kv)) 198 return nil 199 } 200 cmdConfig.Client.Meta[parts[0]] = parts[1] 201 } 202 } 203 204 // Load the configuration 205 var config *Config 206 if dev { 207 config = DevConfig() 208 } else { 209 config = DefaultConfig() 210 } 211 212 // Merge in the enterprise overlay 213 config.Merge(DefaultEntConfig()) 214 215 for _, path := range configPath { 216 current, err := LoadConfig(path) 217 if err != nil { 218 c.Ui.Error(fmt.Sprintf( 219 "Error loading configuration from %s: %s", path, err)) 220 return nil 221 } 222 223 // The user asked us to load some config here but we didn't find any, 224 // so we'll complain but continue. 225 if current == nil || reflect.DeepEqual(current, &Config{}) { 226 c.Ui.Warn(fmt.Sprintf("No configuration loaded from %s", path)) 227 } 228 229 if config == nil { 230 config = current 231 } else { 232 config = config.Merge(current) 233 } 234 } 235 236 // Ensure the sub-structs at least exist 237 if config.Client == nil { 238 config.Client = &ClientConfig{} 239 } 240 if config.Server == nil { 241 config.Server = &ServerConfig{} 242 } 243 244 // Merge any CLI options over config file options 245 config = config.Merge(cmdConfig) 246 247 // Set the version info 248 config.Version = c.Version 249 250 // Normalize binds, ports, addresses, and advertise 251 if err := config.normalizeAddrs(); err != nil { 252 c.Ui.Error(err.Error()) 253 return nil 254 } 255 256 // Check to see if we should read the Vault token from the environment 257 if config.Vault.Token == "" { 258 if token, ok := os.LookupEnv("VAULT_TOKEN"); ok { 259 config.Vault.Token = token 260 } 261 } 262 263 // Default the plugin directory to be under that of the data directory if it 264 // isn't explicitly specified. 265 if config.PluginDir == "" && config.DataDir != "" { 266 config.PluginDir = filepath.Join(config.DataDir, "plugins") 267 } 268 269 if !c.isValidConfig(config) { 270 return nil 271 } 272 273 return config 274 } 275 276 func (c *Command) isValidConfig(config *Config) bool { 277 278 // Check that the server is running in at least one mode. 279 if !(config.Server.Enabled || config.Client.Enabled) { 280 c.Ui.Error("Must specify either server, client or dev mode for the agent.") 281 return false 282 } 283 284 // Set up the TLS configuration properly if we have one. 285 // XXX chelseakomlo: set up a TLSConfig New method which would wrap 286 // constructor-type actions like this. 287 if config.TLSConfig != nil && !config.TLSConfig.IsEmpty() { 288 if err := config.TLSConfig.SetChecksum(); err != nil { 289 c.Ui.Error(fmt.Sprintf("WARNING: Error when parsing TLS configuration: %v", err)) 290 } 291 } 292 293 if config.Server.EncryptKey != "" { 294 if _, err := config.Server.EncryptBytes(); err != nil { 295 c.Ui.Error(fmt.Sprintf("Invalid encryption key: %s", err)) 296 return false 297 } 298 keyfile := filepath.Join(config.DataDir, serfKeyring) 299 if _, err := os.Stat(keyfile); err == nil { 300 c.Ui.Warn("WARNING: keyring exists but -encrypt given, using keyring") 301 } 302 } 303 304 // Verify the paths are absolute. 305 dirs := map[string]string{ 306 "data-dir": config.DataDir, 307 "plugin-dir": config.PluginDir, 308 "alloc-dir": config.Client.AllocDir, 309 "state-dir": config.Client.StateDir, 310 } 311 for k, dir := range dirs { 312 if dir == "" { 313 continue 314 } 315 316 if !filepath.IsAbs(dir) { 317 c.Ui.Error(fmt.Sprintf("%s must be given as an absolute path: got %v", k, dir)) 318 return false 319 } 320 } 321 322 if config.Client.Enabled { 323 for k := range config.Client.Meta { 324 if !helper.IsValidInterpVariable(k) { 325 c.Ui.Error(fmt.Sprintf("Invalid Client.Meta key: %v", k)) 326 return false 327 } 328 } 329 } 330 331 if config.DevMode { 332 // Skip the rest of the validation for dev mode 333 return true 334 } 335 336 // Ensure that we have the directories we need to run. 337 if config.Server.Enabled && config.DataDir == "" { 338 c.Ui.Error("Must specify data directory") 339 return false 340 } 341 342 // The config is valid if the top-level data-dir is set or if both 343 // alloc-dir and state-dir are set. 344 if config.Client.Enabled && config.DataDir == "" { 345 if config.Client.AllocDir == "" || config.Client.StateDir == "" || config.PluginDir == "" { 346 c.Ui.Error("Must specify the state, alloc dir, and plugin dir if data-dir is omitted.") 347 return false 348 } 349 } 350 351 // Check the bootstrap flags 352 if config.Server.BootstrapExpect > 0 && !config.Server.Enabled { 353 c.Ui.Error("Bootstrap requires server mode to be enabled") 354 return false 355 } 356 if config.Server.BootstrapExpect == 1 { 357 c.Ui.Error("WARNING: Bootstrap mode enabled! Potentially unsafe operation.") 358 } 359 360 return true 361 } 362 363 // setupLoggers is used to setup the logGate, logWriter, and our logOutput 364 func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, io.Writer) { 365 // Setup logging. First create the gated log writer, which will 366 // store logs until we're ready to show them. Then create the level 367 // filter, filtering logs of the specified level. 368 logGate := &gatedwriter.Writer{ 369 Writer: &cli.UiWriter{Ui: c.Ui}, 370 } 371 372 c.logFilter = LevelFilter() 373 c.logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel)) 374 c.logFilter.Writer = logGate 375 if !ValidateLevelFilter(c.logFilter.MinLevel, c.logFilter) { 376 c.Ui.Error(fmt.Sprintf( 377 "Invalid log level: %s. Valid log levels are: %v", 378 c.logFilter.MinLevel, c.logFilter.Levels)) 379 return nil, nil, nil 380 } 381 382 // Check if syslog is enabled 383 var syslog io.Writer 384 if config.EnableSyslog { 385 l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, config.SyslogFacility, "nomad") 386 if err != nil { 387 c.Ui.Error(fmt.Sprintf("Syslog setup failed: %v", err)) 388 return nil, nil, nil 389 } 390 syslog = &SyslogWrapper{l, c.logFilter} 391 } 392 393 // Create a log writer, and wrap a logOutput around it 394 logWriter := NewLogWriter(512) 395 var logOutput io.Writer 396 if syslog != nil { 397 logOutput = io.MultiWriter(c.logFilter, logWriter, syslog) 398 } else { 399 logOutput = io.MultiWriter(c.logFilter, logWriter) 400 } 401 c.logOutput = logOutput 402 log.SetOutput(logOutput) 403 return logGate, logWriter, logOutput 404 } 405 406 // setupAgent is used to start the agent and various interfaces 407 func (c *Command) setupAgent(config *Config, logger hclog.Logger, logOutput io.Writer, inmem *metrics.InmemSink) error { 408 c.Ui.Output("Starting Nomad agent...") 409 agent, err := NewAgent(config, logger, logOutput, inmem) 410 if err != nil { 411 c.Ui.Error(fmt.Sprintf("Error starting agent: %s", err)) 412 return err 413 } 414 c.agent = agent 415 416 // Setup the HTTP server 417 http, err := NewHTTPServer(agent, config) 418 if err != nil { 419 agent.Shutdown() 420 c.Ui.Error(fmt.Sprintf("Error starting http server: %s", err)) 421 return err 422 } 423 c.httpServer = http 424 425 // If DisableUpdateCheck is not enabled, set up update checking 426 // (DisableUpdateCheck is false by default) 427 if config.DisableUpdateCheck != nil && !*config.DisableUpdateCheck { 428 version := config.Version.Version 429 if config.Version.VersionPrerelease != "" { 430 version += fmt.Sprintf("-%s", config.Version.VersionPrerelease) 431 } 432 updateParams := &checkpoint.CheckParams{ 433 Product: "nomad", 434 Version: version, 435 } 436 if !config.DisableAnonymousSignature { 437 updateParams.SignatureFile = filepath.Join(config.DataDir, "checkpoint-signature") 438 } 439 440 // Schedule a periodic check with expected interval of 24 hours 441 checkpoint.CheckInterval(updateParams, 24*time.Hour, c.checkpointResults) 442 443 // Do an immediate check within the next 30 seconds 444 go func() { 445 time.Sleep(lib.RandomStagger(30 * time.Second)) 446 c.checkpointResults(checkpoint.Check(updateParams)) 447 }() 448 } 449 450 return nil 451 } 452 453 // checkpointResults is used to handler periodic results from our update checker 454 func (c *Command) checkpointResults(results *checkpoint.CheckResponse, err error) { 455 if err != nil { 456 c.Ui.Error(fmt.Sprintf("Failed to check for updates: %v", err)) 457 return 458 } 459 if results.Outdated { 460 c.Ui.Error(fmt.Sprintf("Newer Nomad version available: %s (currently running: %s)", results.CurrentVersion, c.Version.VersionNumber())) 461 } 462 for _, alert := range results.Alerts { 463 switch alert.Level { 464 case "info": 465 c.Ui.Info(fmt.Sprintf("Bulletin [%s]: %s (%s)", alert.Level, alert.Message, alert.URL)) 466 default: 467 c.Ui.Error(fmt.Sprintf("Bulletin [%s]: %s (%s)", alert.Level, alert.Message, alert.URL)) 468 } 469 } 470 } 471 472 func (c *Command) AutocompleteFlags() complete.Flags { 473 configFilePredictor := complete.PredictOr( 474 complete.PredictFiles("*.json"), 475 complete.PredictFiles("*.hcl")) 476 477 return map[string]complete.Predictor{ 478 "-dev": complete.PredictNothing, 479 "-server": complete.PredictNothing, 480 "-client": complete.PredictNothing, 481 "-bootstrap-expect": complete.PredictAnything, 482 "-encrypt": complete.PredictAnything, 483 "-raft-protocol": complete.PredictAnything, 484 "-rejoin": complete.PredictNothing, 485 "-join": complete.PredictAnything, 486 "-retry-join": complete.PredictAnything, 487 "-retry-max": complete.PredictAnything, 488 "-state-dir": complete.PredictDirs("*"), 489 "-alloc-dir": complete.PredictDirs("*"), 490 "-node-class": complete.PredictAnything, 491 "-servers": complete.PredictAnything, 492 "-meta": complete.PredictAnything, 493 "-config": configFilePredictor, 494 "-bind": complete.PredictAnything, 495 "-region": complete.PredictAnything, 496 "-data-dir": complete.PredictDirs("*"), 497 "-plugin-dir": complete.PredictDirs("*"), 498 "-dc": complete.PredictAnything, 499 "-log-level": complete.PredictAnything, 500 "-json-logs": complete.PredictNothing, 501 "-node": complete.PredictAnything, 502 "-consul-auth": complete.PredictAnything, 503 "-consul-auto-advertise": complete.PredictNothing, 504 "-consul-ca-file": complete.PredictAnything, 505 "-consul-cert-file": complete.PredictAnything, 506 "-consul-key-file": complete.PredictAnything, 507 "-consul-checks-use-advertise": complete.PredictNothing, 508 "-consul-client-auto-join": complete.PredictNothing, 509 "-consul-client-service-name": complete.PredictAnything, 510 "-consul-client-http-check-name": complete.PredictAnything, 511 "-consul-server-service-name": complete.PredictAnything, 512 "-consul-server-http-check-name": complete.PredictAnything, 513 "-consul-server-serf-check-name": complete.PredictAnything, 514 "-consul-server-rpc-check-name": complete.PredictAnything, 515 "-consul-server-auto-join": complete.PredictNothing, 516 "-consul-ssl": complete.PredictNothing, 517 "-consul-verify-ssl": complete.PredictNothing, 518 "-consul-address": complete.PredictAnything, 519 "-vault-enabled": complete.PredictNothing, 520 "-vault-allow-unauthenticated": complete.PredictNothing, 521 "-vault-token": complete.PredictAnything, 522 "-vault-address": complete.PredictAnything, 523 "-vault-create-from-role": complete.PredictAnything, 524 "-vault-ca-file": complete.PredictAnything, 525 "-vault-ca-path": complete.PredictAnything, 526 "-vault-cert-file": complete.PredictAnything, 527 "-vault-key-file": complete.PredictAnything, 528 "-vault-tls-skip-verify": complete.PredictNothing, 529 "-vault-tls-server-name": complete.PredictAnything, 530 "-acl-enabled": complete.PredictNothing, 531 "-acl-replication-token": complete.PredictAnything, 532 } 533 } 534 535 func (c *Command) AutocompleteArgs() complete.Predictor { 536 return nil 537 } 538 539 func (c *Command) Run(args []string) int { 540 c.Ui = &cli.PrefixedUi{ 541 OutputPrefix: "==> ", 542 InfoPrefix: " ", 543 ErrorPrefix: "==> ", 544 Ui: c.Ui, 545 } 546 547 // Parse our configs 548 c.args = args 549 config := c.readConfig() 550 if config == nil { 551 return 1 552 } 553 554 // Setup the log outputs 555 logGate, _, logOutput := c.setupLoggers(config) 556 if logGate == nil { 557 return 1 558 } 559 560 // Create logger 561 logger := hclog.New(&hclog.LoggerOptions{ 562 Name: "agent", 563 Level: hclog.LevelFromString(config.LogLevel), 564 Output: logOutput, 565 JSONFormat: config.LogJson, 566 }) 567 568 // Swap out UI implementation if json logging is enabled 569 if config.LogJson { 570 c.Ui = &logging.HcLogUI{Log: logger} 571 } 572 573 // Log config files 574 if len(config.Files) > 0 { 575 c.Ui.Output(fmt.Sprintf("Loaded configuration from %s", strings.Join(config.Files, ", "))) 576 } else { 577 c.Ui.Output("No configuration files loaded") 578 } 579 580 // Initialize the telemetry 581 inmem, err := c.setupTelemetry(config) 582 if err != nil { 583 c.Ui.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) 584 return 1 585 } 586 587 // Create the agent 588 if err := c.setupAgent(config, logger, logOutput, inmem); err != nil { 589 logGate.Flush() 590 return 1 591 } 592 defer c.agent.Shutdown() 593 594 // Shutdown the HTTP server at the end 595 defer func() { 596 if c.httpServer != nil { 597 c.httpServer.Shutdown() 598 } 599 }() 600 601 // Join startup nodes if specified 602 if err := c.startupJoin(config); err != nil { 603 c.Ui.Error(err.Error()) 604 return 1 605 } 606 607 // Compile agent information for output later 608 info := make(map[string]string) 609 info["version"] = config.Version.VersionNumber() 610 info["client"] = strconv.FormatBool(config.Client.Enabled) 611 info["log level"] = config.LogLevel 612 info["server"] = strconv.FormatBool(config.Server.Enabled) 613 info["region"] = fmt.Sprintf("%s (DC: %s)", config.Region, config.Datacenter) 614 info["bind addrs"] = c.getBindAddrSynopsis() 615 info["advertise addrs"] = c.getAdvertiseAddrSynopsis() 616 617 // Sort the keys for output 618 infoKeys := make([]string, 0, len(info)) 619 for key := range info { 620 infoKeys = append(infoKeys, key) 621 } 622 sort.Strings(infoKeys) 623 624 // Agent configuration output 625 padding := 18 626 c.Ui.Output("Nomad agent configuration:\n") 627 for _, k := range infoKeys { 628 c.Ui.Info(fmt.Sprintf( 629 "%s%s: %s", 630 strings.Repeat(" ", padding-len(k)), 631 strings.Title(k), 632 info[k])) 633 } 634 c.Ui.Output("") 635 636 // Output the header that the server has started 637 c.Ui.Output("Nomad agent started! Log data will stream in below:\n") 638 639 // Enable log streaming 640 logGate.Flush() 641 642 // Start retry join process 643 if err := c.handleRetryJoin(config); err != nil { 644 c.Ui.Error(err.Error()) 645 return 1 646 } 647 648 // Wait for exit 649 return c.handleSignals() 650 } 651 652 // handleRetryJoin is used to start retry joining if it is configured. 653 func (c *Command) handleRetryJoin(config *Config) error { 654 c.retryJoinErrCh = make(chan struct{}) 655 656 if config.Server.Enabled && len(config.Server.RetryJoin) != 0 { 657 joiner := retryJoiner{ 658 discover: &discover.Discover{}, 659 errCh: c.retryJoinErrCh, 660 logger: c.agent.logger.Named("joiner"), 661 serverJoin: c.agent.server.Join, 662 serverEnabled: true, 663 } 664 665 if err := joiner.Validate(config); err != nil { 666 return err 667 } 668 669 // Remove the duplicate fields 670 if len(config.Server.RetryJoin) != 0 { 671 config.Server.ServerJoin.RetryJoin = config.Server.RetryJoin 672 config.Server.RetryJoin = nil 673 } 674 if config.Server.RetryMaxAttempts != 0 { 675 config.Server.ServerJoin.RetryMaxAttempts = config.Server.RetryMaxAttempts 676 config.Server.RetryMaxAttempts = 0 677 } 678 if config.Server.RetryInterval != 0 { 679 config.Server.ServerJoin.RetryInterval = config.Server.RetryInterval 680 config.Server.RetryInterval = 0 681 } 682 683 c.agent.logger.Warn("using deprecated retry_join fields. Upgrade configuration to use server_join") 684 } 685 686 if config.Server.Enabled && 687 config.Server.ServerJoin != nil && 688 len(config.Server.ServerJoin.RetryJoin) != 0 { 689 690 joiner := retryJoiner{ 691 discover: &discover.Discover{}, 692 errCh: c.retryJoinErrCh, 693 logger: c.agent.logger.Named("joiner"), 694 serverJoin: c.agent.server.Join, 695 serverEnabled: true, 696 } 697 698 if err := joiner.Validate(config); err != nil { 699 return err 700 } 701 702 go joiner.RetryJoin(config.Server.ServerJoin) 703 } 704 705 if config.Client.Enabled && 706 config.Client.ServerJoin != nil && 707 len(config.Client.ServerJoin.RetryJoin) != 0 { 708 joiner := retryJoiner{ 709 discover: &discover.Discover{}, 710 errCh: c.retryJoinErrCh, 711 logger: c.agent.logger.Named("joiner"), 712 clientJoin: c.agent.client.SetServers, 713 clientEnabled: true, 714 } 715 716 if err := joiner.Validate(config); err != nil { 717 return err 718 } 719 720 go joiner.RetryJoin(config.Client.ServerJoin) 721 } 722 723 return nil 724 } 725 726 // handleSignals blocks until we get an exit-causing signal 727 func (c *Command) handleSignals() int { 728 signalCh := make(chan os.Signal, 4) 729 signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGPIPE) 730 731 // Wait for a signal 732 WAIT: 733 var sig os.Signal 734 select { 735 case s := <-signalCh: 736 sig = s 737 case <-c.ShutdownCh: 738 sig = os.Interrupt 739 case <-c.retryJoinErrCh: 740 return 1 741 } 742 743 // Skip any SIGPIPE signal and don't try to log it (See issues #1798, #3554) 744 if sig == syscall.SIGPIPE { 745 goto WAIT 746 } 747 748 c.Ui.Output(fmt.Sprintf("Caught signal: %v", sig)) 749 750 // Check if this is a SIGHUP 751 if sig == syscall.SIGHUP { 752 c.handleReload() 753 goto WAIT 754 } 755 756 // Check if we should do a graceful leave 757 graceful := false 758 if sig == os.Interrupt && c.agent.GetConfig().LeaveOnInt { 759 graceful = true 760 } else if sig == syscall.SIGTERM && c.agent.GetConfig().LeaveOnTerm { 761 graceful = true 762 } 763 764 // Bail fast if not doing a graceful leave 765 if !graceful { 766 return 1 767 } 768 769 // Attempt a graceful leave 770 gracefulCh := make(chan struct{}) 771 c.Ui.Output("Gracefully shutting down agent...") 772 go func() { 773 if err := c.agent.Leave(); err != nil { 774 c.Ui.Error(fmt.Sprintf("Error: %s", err)) 775 return 776 } 777 close(gracefulCh) 778 }() 779 780 // Wait for leave or another signal 781 select { 782 case <-signalCh: 783 return 1 784 case <-time.After(gracefulTimeout): 785 return 1 786 case <-gracefulCh: 787 return 0 788 } 789 } 790 791 // reloadHTTPServer shuts down the existing HTTP server and restarts it. This 792 // is helpful when reloading the agent configuration. 793 func (c *Command) reloadHTTPServer() error { 794 c.agent.logger.Info("reloading HTTP server with new TLS configuration") 795 796 c.httpServer.Shutdown() 797 798 http, err := NewHTTPServer(c.agent, c.agent.config) 799 if err != nil { 800 return err 801 } 802 c.httpServer = http 803 804 return nil 805 } 806 807 // handleReload is invoked when we should reload our configs, e.g. SIGHUP 808 func (c *Command) handleReload() { 809 c.Ui.Output("Reloading configuration...") 810 newConf := c.readConfig() 811 if newConf == nil { 812 c.Ui.Error(fmt.Sprintf("Failed to reload configs")) 813 return 814 } 815 816 // Change the log level 817 minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel)) 818 if ValidateLevelFilter(minLevel, c.logFilter) { 819 c.logFilter.SetMinLevel(minLevel) 820 } else { 821 c.Ui.Error(fmt.Sprintf( 822 "Invalid log level: %s. Valid log levels are: %v", 823 minLevel, c.logFilter.Levels)) 824 825 // Keep the current log level 826 newConf.LogLevel = c.agent.GetConfig().LogLevel 827 } 828 829 shouldReloadAgent, shouldReloadHTTP := c.agent.ShouldReload(newConf) 830 if shouldReloadAgent { 831 c.agent.logger.Debug("starting reload of agent config") 832 err := c.agent.Reload(newConf) 833 if err != nil { 834 c.agent.logger.Error("failed to reload the config", "error", err) 835 return 836 } 837 } 838 839 if s := c.agent.Server(); s != nil { 840 c.agent.logger.Debug("starting reload of server config") 841 sconf, err := convertServerConfig(newConf) 842 if err != nil { 843 c.agent.logger.Error("failed to convert server config", "error", err) 844 return 845 } 846 847 // Finalize the config to get the agent objects injected in 848 c.agent.finalizeServerConfig(sconf) 849 850 // Reload the config 851 if err := s.Reload(sconf); err != nil { 852 c.agent.logger.Error("reloading server config failed", "error", err) 853 return 854 } 855 } 856 857 if s := c.agent.Client(); s != nil { 858 c.agent.logger.Debug("starting reload of client config") 859 clientConfig, err := convertClientConfig(newConf) 860 if err != nil { 861 c.agent.logger.Error("failed to convert client config", "error", err) 862 return 863 } 864 865 // Finalize the config to get the agent objects injected in 866 if err := c.agent.finalizeClientConfig(clientConfig); err != nil { 867 c.agent.logger.Error("failed to finalize client config", "error", err) 868 return 869 } 870 871 if err := c.agent.Client().Reload(clientConfig); err != nil { 872 c.agent.logger.Error("reloading client config failed", "error", err) 873 return 874 } 875 } 876 877 // reload HTTP server after we have reloaded both client and server, in case 878 // we error in either of the above cases. For example, reloading the http 879 // server to a TLS connection could succeed, while reloading the server's rpc 880 // connections could fail. 881 if shouldReloadHTTP { 882 err := c.reloadHTTPServer() 883 if err != nil { 884 c.agent.httpLogger.Error("reloading config failed", "error", err) 885 return 886 } 887 } 888 } 889 890 // setupTelemetry is used ot setup the telemetry sub-systems 891 func (c *Command) setupTelemetry(config *Config) (*metrics.InmemSink, error) { 892 /* Setup telemetry 893 Aggregate on 10 second intervals for 1 minute. Expose the 894 metrics over stderr when there is a SIGUSR1 received. 895 */ 896 inm := metrics.NewInmemSink(10*time.Second, time.Minute) 897 metrics.DefaultInmemSignal(inm) 898 899 var telConfig *Telemetry 900 if config.Telemetry == nil { 901 telConfig = &Telemetry{} 902 } else { 903 telConfig = config.Telemetry 904 } 905 906 metricsConf := metrics.DefaultConfig("nomad") 907 metricsConf.EnableHostname = !telConfig.DisableHostname 908 909 // Prefer the hostname as a label. 910 metricsConf.EnableHostnameLabel = !telConfig.DisableHostname && 911 !telConfig.DisableTaggedMetrics && !telConfig.BackwardsCompatibleMetrics 912 913 if telConfig.UseNodeName { 914 metricsConf.HostName = config.NodeName 915 metricsConf.EnableHostname = true 916 } 917 918 allowedPrefixes, blockedPrefixes, err := telConfig.PrefixFilters() 919 if err != nil { 920 return inm, err 921 } 922 923 metricsConf.AllowedPrefixes = allowedPrefixes 924 metricsConf.BlockedPrefixes = blockedPrefixes 925 926 if telConfig.FilterDefault != nil { 927 metricsConf.FilterDefault = *telConfig.FilterDefault 928 } 929 930 // Configure the statsite sink 931 var fanout metrics.FanoutSink 932 if telConfig.StatsiteAddr != "" { 933 sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr) 934 if err != nil { 935 return inm, err 936 } 937 fanout = append(fanout, sink) 938 } 939 940 // Configure the statsd sink 941 if telConfig.StatsdAddr != "" { 942 sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr) 943 if err != nil { 944 return inm, err 945 } 946 fanout = append(fanout, sink) 947 } 948 949 // Configure the prometheus sink 950 if telConfig.PrometheusMetrics { 951 promSink, err := prometheus.NewPrometheusSink() 952 if err != nil { 953 return inm, err 954 } 955 fanout = append(fanout, promSink) 956 } 957 958 // Configure the datadog sink 959 if telConfig.DataDogAddr != "" { 960 sink, err := datadog.NewDogStatsdSink(telConfig.DataDogAddr, config.NodeName) 961 if err != nil { 962 return inm, err 963 } 964 sink.SetTags(telConfig.DataDogTags) 965 fanout = append(fanout, sink) 966 } 967 968 // Configure the Circonus sink 969 if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" { 970 cfg := &circonus.Config{} 971 cfg.Interval = telConfig.CirconusSubmissionInterval 972 cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken 973 cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp 974 cfg.CheckManager.API.URL = telConfig.CirconusAPIURL 975 cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL 976 cfg.CheckManager.Check.ID = telConfig.CirconusCheckID 977 cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation 978 cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID 979 cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag 980 cfg.CheckManager.Check.Tags = telConfig.CirconusCheckTags 981 cfg.CheckManager.Check.DisplayName = telConfig.CirconusCheckDisplayName 982 cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID 983 cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag 984 985 if cfg.CheckManager.Check.DisplayName == "" { 986 cfg.CheckManager.Check.DisplayName = "Nomad" 987 } 988 989 if cfg.CheckManager.API.TokenApp == "" { 990 cfg.CheckManager.API.TokenApp = "nomad" 991 } 992 993 if cfg.CheckManager.Check.SearchTag == "" { 994 cfg.CheckManager.Check.SearchTag = "service:nomad" 995 } 996 997 sink, err := circonus.NewCirconusSink(cfg) 998 if err != nil { 999 return inm, err 1000 } 1001 sink.Start() 1002 fanout = append(fanout, sink) 1003 } 1004 1005 // Initialize the global sink 1006 if len(fanout) > 0 { 1007 fanout = append(fanout, inm) 1008 metrics.NewGlobal(metricsConf, fanout) 1009 } else { 1010 metricsConf.EnableHostname = false 1011 metrics.NewGlobal(metricsConf, inm) 1012 } 1013 1014 return inm, nil 1015 } 1016 1017 func (c *Command) startupJoin(config *Config) error { 1018 // Nothing to do 1019 if !config.Server.Enabled { 1020 return nil 1021 } 1022 1023 // Validate both old and new aren't being set 1024 old := len(config.Server.StartJoin) 1025 var new int 1026 if config.Server.ServerJoin != nil { 1027 new = len(config.Server.ServerJoin.StartJoin) 1028 } 1029 if old != 0 && new != 0 { 1030 return fmt.Errorf("server_join and start_join cannot both be defined; prefer setting the server_join stanza") 1031 } 1032 1033 // Nothing to do 1034 if old+new == 0 { 1035 return nil 1036 } 1037 1038 // Combine the lists and join 1039 joining := config.Server.StartJoin 1040 if new != 0 { 1041 joining = append(joining, config.Server.ServerJoin.StartJoin...) 1042 } 1043 1044 c.Ui.Output("Joining cluster...") 1045 n, err := c.agent.server.Join(joining) 1046 if err != nil { 1047 return err 1048 } 1049 1050 c.Ui.Output(fmt.Sprintf("Join completed. Synced with %d initial agents", n)) 1051 return nil 1052 } 1053 1054 // getBindAddrSynopsis returns a string that describes the addresses the agent 1055 // is bound to. 1056 func (c *Command) getBindAddrSynopsis() string { 1057 if c == nil || c.agent == nil || c.agent.config == nil || c.agent.config.normalizedAddrs == nil { 1058 return "" 1059 } 1060 1061 b := new(strings.Builder) 1062 fmt.Fprintf(b, "HTTP: %s", c.agent.config.normalizedAddrs.HTTP) 1063 1064 if c.agent.server != nil { 1065 if c.agent.config.normalizedAddrs.RPC != "" { 1066 fmt.Fprintf(b, "; RPC: %s", c.agent.config.normalizedAddrs.RPC) 1067 } 1068 if c.agent.config.normalizedAddrs.Serf != "" { 1069 fmt.Fprintf(b, "; Serf: %s", c.agent.config.normalizedAddrs.Serf) 1070 } 1071 } 1072 1073 return b.String() 1074 } 1075 1076 // getAdvertiseAddrSynopsis returns a string that describes the addresses the agent 1077 // is advertising. 1078 func (c *Command) getAdvertiseAddrSynopsis() string { 1079 if c == nil || c.agent == nil || c.agent.config == nil || c.agent.config.AdvertiseAddrs == nil { 1080 return "" 1081 } 1082 1083 b := new(strings.Builder) 1084 fmt.Fprintf(b, "HTTP: %s", c.agent.config.AdvertiseAddrs.HTTP) 1085 1086 if c.agent.server != nil { 1087 if c.agent.config.AdvertiseAddrs.RPC != "" { 1088 fmt.Fprintf(b, "; RPC: %s", c.agent.config.AdvertiseAddrs.RPC) 1089 } 1090 if c.agent.config.AdvertiseAddrs.Serf != "" { 1091 fmt.Fprintf(b, "; Serf: %s", c.agent.config.AdvertiseAddrs.Serf) 1092 } 1093 } 1094 1095 return b.String() 1096 } 1097 1098 func (c *Command) Synopsis() string { 1099 return "Runs a Nomad agent" 1100 } 1101 1102 func (c *Command) Help() string { 1103 helpText := ` 1104 Usage: nomad agent [options] 1105 1106 Starts the Nomad agent and runs until an interrupt is received. 1107 The agent may be a client and/or server. 1108 1109 The Nomad agent's configuration primarily comes from the config 1110 files used, but a subset of the options may also be passed directly 1111 as CLI arguments, listed below. 1112 1113 General Options (clients and servers): 1114 1115 -bind=<addr> 1116 The address the agent will bind to for all of its various network 1117 services. The individual services that run bind to individual 1118 ports on this address. Defaults to the loopback 127.0.0.1. 1119 1120 -config=<path> 1121 The path to either a single config file or a directory of config 1122 files to use for configuring the Nomad agent. This option may be 1123 specified multiple times. If multiple config files are used, the 1124 values from each will be merged together. During merging, values 1125 from files found later in the list are merged over values from 1126 previously parsed files. 1127 1128 -data-dir=<path> 1129 The data directory used to store state and other persistent data. 1130 On client machines this is used to house allocation data such as 1131 downloaded artifacts used by drivers. On server nodes, the data 1132 dir is also used to store the replicated log. 1133 1134 -plugin-dir=<path> 1135 The plugin directory is used to discover Nomad plugins. If not specified, 1136 the plugin directory defaults to be that of <data-dir>/plugins/. 1137 1138 -dc=<datacenter> 1139 The name of the datacenter this Nomad agent is a member of. By 1140 default this is set to "dc1". 1141 1142 -log-level=<level> 1143 Specify the verbosity level of Nomad's logs. Valid values include 1144 DEBUG, INFO, and WARN, in decreasing order of verbosity. The 1145 default is INFO. 1146 1147 -log-json 1148 Output logs in a JSON format. The default is false. 1149 1150 -node=<name> 1151 The name of the local agent. This name is used to identify the node 1152 in the cluster. The name must be unique per region. The default is 1153 the current hostname of the machine. 1154 1155 -region=<region> 1156 Name of the region the Nomad agent will be a member of. By default 1157 this value is set to "global". 1158 1159 -dev 1160 Start the agent in development mode. This enables a pre-configured 1161 dual-role agent (client + server) which is useful for developing 1162 or testing Nomad. No other configuration is required to start the 1163 agent in this mode. 1164 1165 Server Options: 1166 1167 -server 1168 Enable server mode for the agent. Agents in server mode are 1169 clustered together and handle the additional responsibility of 1170 leader election, data replication, and scheduling work onto 1171 eligible client nodes. 1172 1173 -bootstrap-expect=<num> 1174 Configures the expected number of servers nodes to wait for before 1175 bootstrapping the cluster. Once <num> servers have joined each other, 1176 Nomad initiates the bootstrap process. 1177 1178 -encrypt=<key> 1179 Provides the gossip encryption key 1180 1181 -join=<address> 1182 Address of an agent to join at start time. Can be specified 1183 multiple times. 1184 1185 -raft-protocol=<num> 1186 The Raft protocol version to use. Used for enabling certain Autopilot 1187 features. Defaults to 2. 1188 1189 -retry-join=<address> 1190 Address of an agent to join at start time with retries enabled. 1191 Can be specified multiple times. 1192 1193 -retry-max=<num> 1194 Maximum number of join attempts. Defaults to 0, which will retry 1195 indefinitely. 1196 1197 -retry-interval=<dur> 1198 Time to wait between join attempts. 1199 1200 -rejoin 1201 Ignore a previous leave and attempts to rejoin the cluster. 1202 1203 Client Options: 1204 1205 -client 1206 Enable client mode for the agent. Client mode enables a given node to be 1207 evaluated for allocations. If client mode is not enabled, no work will be 1208 scheduled to the agent. 1209 1210 -state-dir 1211 The directory used to store state and other persistent data. If not 1212 specified a subdirectory under the "-data-dir" will be used. 1213 1214 -alloc-dir 1215 The directory used to store allocation data such as downloaded artifacts as 1216 well as data produced by tasks. If not specified, a subdirectory under the 1217 "-data-dir" will be used. 1218 1219 -servers 1220 A list of known server addresses to connect to given as "host:port" and 1221 delimited by commas. 1222 1223 -node-class 1224 Mark this node as a member of a node-class. This can be used to label 1225 similar node types. 1226 1227 -meta 1228 User specified metadata to associated with the node. Each instance of -meta 1229 parses a single KEY=VALUE pair. Repeat the meta flag for each key/value pair 1230 to be added. 1231 1232 -network-interface 1233 Forces the network fingerprinter to use the specified network interface. 1234 1235 -network-speed 1236 The default speed for network interfaces in MBits if the link speed can not 1237 be determined dynamically. 1238 1239 ACL Options: 1240 1241 -acl-enabled 1242 Specifies whether the agent should enable ACLs. 1243 1244 -acl-replication-token 1245 The replication token for servers to use when replicating from the 1246 authoritative region. The token must be a valid management token from the 1247 authoritative region. 1248 1249 Consul Options: 1250 1251 -consul-address=<addr> 1252 Specifies the address to the local Consul agent, given in the format host:port. 1253 Supports Unix sockets with the format: unix:///tmp/consul/consul.sock 1254 1255 -consul-auth=<auth> 1256 Specifies the HTTP Basic Authentication information to use for access to the 1257 Consul Agent, given in the format username:password. 1258 1259 -consul-auto-advertise 1260 Specifies if Nomad should advertise its services in Consul. The services 1261 are named according to server_service_name and client_service_name. Nomad 1262 servers and clients advertise their respective services, each tagged 1263 appropriately with either http or rpc tag. Nomad servers also advertise a 1264 serf tagged service. 1265 1266 -consul-ca-file=<path> 1267 Specifies an optional path to the CA certificate used for Consul communication. 1268 This defaults to the system bundle if unspecified. 1269 1270 -consul-cert-file=<path> 1271 Specifies the path to the certificate used for Consul communication. If this 1272 is set then you need to also set key_file. 1273 1274 -consul-checks-use-advertise 1275 Specifies if Consul heath checks should bind to the advertise address. By 1276 default, this is the bind address. 1277 1278 -consul-client-auto-join 1279 Specifies if the Nomad clients should automatically discover servers in the 1280 same region by searching for the Consul service name defined in the 1281 server_service_name option. 1282 1283 -consul-client-service-name=<name> 1284 Specifies the name of the service in Consul for the Nomad clients. 1285 1286 -consul-client-http-check-name=<name> 1287 Specifies the HTTP health check name in Consul for the Nomad clients. 1288 1289 -consul-key-file=<path> 1290 Specifies the path to the private key used for Consul communication. If this 1291 is set then you need to also set cert_file. 1292 1293 -consul-server-service-name=<name> 1294 Specifies the name of the service in Consul for the Nomad servers. 1295 1296 -consul-server-http-check-name=<name> 1297 Specifies the HTTP health check name in Consul for the Nomad servers. 1298 1299 -consul-server-serf-check-name=<name> 1300 Specifies the Serf health check name in Consul for the Nomad servers. 1301 1302 -consul-server-rpc-check-name=<name> 1303 Specifies the RPC health check name in Consul for the Nomad servers. 1304 1305 -consul-server-auto-join 1306 Specifies if the Nomad servers should automatically discover and join other 1307 Nomad servers by searching for the Consul service name defined in the 1308 server_service_name option. This search only happens if the server does not 1309 have a leader. 1310 1311 -consul-ssl 1312 Specifies if the transport scheme should use HTTPS to communicate with the 1313 Consul agent. 1314 1315 -consul-token=<token> 1316 Specifies the token used to provide a per-request ACL token. 1317 1318 -consul-verify-ssl 1319 Specifies if SSL peer verification should be used when communicating to the 1320 Consul API client over HTTPS. 1321 1322 Vault Options: 1323 1324 -vault-enabled 1325 Whether to enable or disable Vault integration. 1326 1327 -vault-address=<addr> 1328 The address to communicate with Vault. This should be provided with the http:// 1329 or https:// prefix. 1330 1331 -vault-token=<token> 1332 The Vault token used to derive tokens from Vault on behalf of clients. 1333 This only needs to be set on Servers. Overrides the Vault token read from 1334 the VAULT_TOKEN environment variable. 1335 1336 -vault-create-from-role=<role> 1337 The role name to create tokens for tasks from. 1338 1339 -vault-allow-unauthenticated 1340 Whether to allow jobs to be submitted that request Vault Tokens but do not 1341 authentication. The flag only applies to Servers. 1342 1343 -vault-ca-file=<path> 1344 The path to a PEM-encoded CA cert file to use to verify the Vault server SSL 1345 certificate. 1346 1347 -vault-ca-path=<path> 1348 The path to a directory of PEM-encoded CA cert files to verify the Vault server 1349 certificate. 1350 1351 -vault-cert-file=<token> 1352 The path to the certificate for Vault communication. 1353 1354 -vault-key-file=<addr> 1355 The path to the private key for Vault communication. 1356 1357 -vault-tls-skip-verify=<token> 1358 Enables or disables SSL certificate verification. 1359 1360 -vault-tls-server-name=<token> 1361 Used to set the SNI host when connecting over TLS. 1362 ` 1363 return strings.TrimSpace(helpText) 1364 }