github.com/hernad/nomad@v1.6.112/command/node_status.go (about) 1 // Copyright (c) HashiCorp, Inc. 2 // SPDX-License-Identifier: MPL-2.0 3 4 package command 5 6 import ( 7 "fmt" 8 "math" 9 "os" 10 "sort" 11 "strconv" 12 "strings" 13 "time" 14 15 humanize "github.com/dustin/go-humanize" 16 "github.com/hernad/nomad/api" 17 "github.com/hernad/nomad/api/contexts" 18 "github.com/hernad/nomad/helper/pointer" 19 "github.com/posener/complete" 20 ) 21 22 const ( 23 // floatFormat is a format string for formatting floats. 24 floatFormat = "#,###.##" 25 26 // bytesPerMegabyte is the number of bytes per MB 27 bytesPerMegabyte = 1024 * 1024 28 ) 29 30 type NodeStatusCommand struct { 31 Meta 32 length int 33 short bool 34 os bool 35 quiet bool 36 verbose bool 37 list_allocs bool 38 self bool 39 stats bool 40 json bool 41 perPage int 42 pageToken string 43 filter string 44 tmpl string 45 } 46 47 func (c *NodeStatusCommand) Help() string { 48 helpText := ` 49 Usage: nomad node status [options] <node> 50 51 Display status information about a given node. The list of nodes 52 returned includes only nodes which jobs may be scheduled to, and 53 includes status and other high-level information. 54 55 If a node ID is passed, information for that specific node will be displayed, 56 including resource usage statistics. If no node ID's are passed, then a 57 short-hand list of all nodes will be displayed. The -self flag is useful to 58 quickly access the status of the local node. 59 60 If ACLs are enabled, this option requires a token with the 'node:read' 61 capability. 62 63 General Options: 64 65 ` + generalOptionsUsage(usageOptsDefault|usageOptsNoNamespace) + ` 66 67 Node Status Options: 68 69 -self 70 Query the status of the local node. 71 72 -stats 73 Display detailed resource usage statistics. 74 75 -allocs 76 Display a count of running allocations for each node. 77 78 -short 79 Display short output. Used only when a single node is being 80 queried, and drops verbose output about node allocations. 81 82 -verbose 83 Display full information. 84 85 -per-page 86 How many results to show per page. 87 88 -page-token 89 Where to start pagination. 90 91 -filter 92 Specifies an expression used to filter query results. 93 94 -os 95 Display operating system name. 96 97 -quiet 98 Display only node IDs. 99 100 -json 101 Output the node in its JSON format. 102 103 -t 104 Format and display node using a Go template. 105 ` 106 return strings.TrimSpace(helpText) 107 } 108 109 func (c *NodeStatusCommand) Synopsis() string { 110 return "Display status information about nodes" 111 } 112 113 func (c *NodeStatusCommand) AutocompleteFlags() complete.Flags { 114 return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), 115 complete.Flags{ 116 "-allocs": complete.PredictNothing, 117 "-filter": complete.PredictAnything, 118 "-json": complete.PredictNothing, 119 "-per-page": complete.PredictAnything, 120 "-page-token": complete.PredictAnything, 121 "-self": complete.PredictNothing, 122 "-short": complete.PredictNothing, 123 "-stats": complete.PredictNothing, 124 "-t": complete.PredictAnything, 125 "-os": complete.PredictAnything, 126 "-quiet": complete.PredictAnything, 127 "-verbose": complete.PredictNothing, 128 }) 129 } 130 131 func (c *NodeStatusCommand) AutocompleteArgs() complete.Predictor { 132 return complete.PredictFunc(func(a complete.Args) []string { 133 client, err := c.Meta.Client() 134 if err != nil { 135 return nil 136 } 137 138 resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Nodes, nil) 139 if err != nil { 140 return []string{} 141 } 142 return resp.Matches[contexts.Nodes] 143 }) 144 } 145 146 func (c *NodeStatusCommand) Name() string { return "node status" } 147 148 func (c *NodeStatusCommand) Run(args []string) int { 149 150 flags := c.Meta.FlagSet(c.Name(), FlagSetClient) 151 flags.Usage = func() { c.Ui.Output(c.Help()) } 152 flags.BoolVar(&c.short, "short", false, "") 153 flags.BoolVar(&c.os, "os", false, "") 154 flags.BoolVar(&c.quiet, "quiet", false, "") 155 flags.BoolVar(&c.verbose, "verbose", false, "") 156 flags.BoolVar(&c.list_allocs, "allocs", false, "") 157 flags.BoolVar(&c.self, "self", false, "") 158 flags.BoolVar(&c.stats, "stats", false, "") 159 flags.BoolVar(&c.json, "json", false, "") 160 flags.StringVar(&c.tmpl, "t", "", "") 161 flags.StringVar(&c.filter, "filter", "", "") 162 flags.IntVar(&c.perPage, "per-page", 0, "") 163 flags.StringVar(&c.pageToken, "page-token", "", "") 164 165 if err := flags.Parse(args); err != nil { 166 return 1 167 } 168 169 // Check that we got either a single node or none 170 args = flags.Args() 171 if len(args) > 1 { 172 c.Ui.Error("This command takes either one or no arguments") 173 c.Ui.Error(commandErrorText(c)) 174 return 1 175 } 176 177 // Truncate the id unless full length is requested 178 c.length = shortId 179 if c.verbose { 180 c.length = fullId 181 } 182 183 // Get the HTTP client 184 client, err := c.Meta.Client() 185 if err != nil { 186 c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) 187 return 1 188 } 189 190 // Use list mode if no node name was provided 191 if len(args) == 0 && !c.self { 192 if c.quiet && (c.verbose || c.json) { 193 c.Ui.Error("-quiet cannot be used with -verbose or -json") 194 return 1 195 } 196 197 // Set up the options to capture any filter passed and pagination 198 // details. 199 opts := api.QueryOptions{ 200 Filter: c.filter, 201 PerPage: int32(c.perPage), 202 NextToken: c.pageToken, 203 } 204 205 // If the user requested showing the node OS, include this within the 206 // query params. 207 if c.os { 208 opts.Params = map[string]string{"os": "true"} 209 } 210 211 // Query the node info 212 nodes, qm, err := client.Nodes().List(&opts) 213 if err != nil { 214 c.Ui.Error(fmt.Sprintf("Error querying node status: %s", err)) 215 return 1 216 } 217 218 // If output format is specified, format and output the node data list 219 if c.json || len(c.tmpl) > 0 { 220 out, err := Format(c.json, c.tmpl, nodes) 221 if err != nil { 222 c.Ui.Error(err.Error()) 223 return 1 224 } 225 226 c.Ui.Output(out) 227 return 0 228 } 229 230 // Return nothing if no nodes found 231 if len(nodes) == 0 { 232 return 0 233 } 234 235 var size int 236 if c.quiet { 237 size = len(nodes) 238 } else { 239 size = len(nodes) + 1 240 } 241 242 // Format the nodes list 243 out := make([]string, size) 244 245 if c.quiet { 246 for i, node := range nodes { 247 out[i] = node.ID 248 } 249 c.Ui.Output(formatList(out)) 250 return 0 251 } 252 253 out[0] = "ID|Node Pool|DC|Name|Class|" 254 255 if c.os { 256 out[0] += "OS|" 257 } 258 259 if c.verbose { 260 out[0] += "Address|Version|" 261 } 262 263 out[0] += "Drain|Eligibility|Status" 264 265 if c.list_allocs { 266 out[0] += "|Running Allocs" 267 } 268 269 for i, node := range nodes { 270 out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s", 271 limit(node.ID, c.length), 272 node.NodePool, 273 node.Datacenter, 274 node.Name, 275 node.NodeClass) 276 if c.os { 277 out[i+1] += fmt.Sprintf("|%s", node.Attributes["os.name"]) 278 } 279 if c.verbose { 280 out[i+1] += fmt.Sprintf("|%s|%s", 281 node.Address, node.Version) 282 } 283 out[i+1] += fmt.Sprintf("|%v|%s|%s", 284 node.Drain, 285 node.SchedulingEligibility, 286 node.Status) 287 288 if c.list_allocs { 289 numAllocs, err := getRunningAllocs(client, node.ID) 290 if err != nil { 291 c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err)) 292 return 1 293 } 294 out[i+1] += fmt.Sprintf("|%v", 295 len(numAllocs)) 296 } 297 } 298 299 // Dump the output 300 c.Ui.Output(formatList(out)) 301 302 if qm.NextToken != "" { 303 c.Ui.Output(fmt.Sprintf(` 304 Results have been paginated. To get the next page run: 305 306 %s -page-token %s`, argsWithoutPageToken(os.Args), qm.NextToken)) 307 } 308 309 return 0 310 } 311 312 // Query the specific node 313 var nodeID string 314 if !c.self { 315 nodeID = args[0] 316 } else { 317 var err error 318 if nodeID, err = getLocalNodeID(client); err != nil { 319 c.Ui.Error(err.Error()) 320 return 1 321 } 322 } 323 if len(nodeID) == 1 { 324 c.Ui.Error("Identifier must contain at least two characters.") 325 return 1 326 } 327 328 nodeID = sanitizeUUIDPrefix(nodeID) 329 nodes, _, err := client.Nodes().PrefixList(nodeID) 330 if err != nil { 331 c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err)) 332 return 1 333 } 334 // Return error if no nodes are found 335 if len(nodes) == 0 { 336 c.Ui.Error(fmt.Sprintf("No node(s) with prefix %q found", nodeID)) 337 return 1 338 } 339 if len(nodes) > 1 { 340 // Dump the output 341 c.Ui.Error(fmt.Sprintf("Prefix matched multiple nodes\n\n%s", 342 formatNodeStubList(nodes, c.verbose))) 343 return 1 344 } 345 346 // Prefix lookup matched a single node 347 node, _, err := client.Nodes().Info(nodes[0].ID, nil) 348 if err != nil { 349 c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err)) 350 return 1 351 } 352 353 // If output format is specified, format and output the data 354 if c.json || len(c.tmpl) > 0 { 355 out, err := Format(c.json, c.tmpl, node) 356 if err != nil { 357 c.Ui.Error(err.Error()) 358 return 1 359 } 360 361 c.Ui.Output(out) 362 return 0 363 } 364 365 return c.formatNode(client, node) 366 } 367 368 func nodeDrivers(n *api.Node) []string { 369 var drivers []string 370 for k, v := range n.Attributes { 371 // driver.docker = 1 372 parts := strings.Split(k, ".") 373 if len(parts) != 2 { 374 continue 375 } else if parts[0] != "driver" { 376 continue 377 } else if v != "1" { 378 continue 379 } 380 381 drivers = append(drivers, parts[1]) 382 } 383 384 sort.Strings(drivers) 385 return drivers 386 } 387 388 func nodeCSIControllerNames(n *api.Node) []string { 389 var names []string 390 for name := range n.CSIControllerPlugins { 391 names = append(names, name) 392 } 393 sort.Strings(names) 394 return names 395 } 396 397 func nodeCSINodeNames(n *api.Node) []string { 398 var names []string 399 for name := range n.CSINodePlugins { 400 names = append(names, name) 401 } 402 sort.Strings(names) 403 return names 404 } 405 406 func nodeCSIVolumeNames(allocs []*api.Allocation) []string { 407 var names []string 408 for _, alloc := range allocs { 409 tg := alloc.GetTaskGroup() 410 if tg == nil || len(tg.Volumes) == 0 { 411 continue 412 } 413 414 for _, v := range tg.Volumes { 415 if v.Type == api.CSIVolumeTypeCSI { 416 names = append(names, v.Name) 417 } 418 } 419 } 420 sort.Strings(names) 421 return names 422 } 423 424 func nodeVolumeNames(n *api.Node) []string { 425 var volumes []string 426 for name := range n.HostVolumes { 427 volumes = append(volumes, name) 428 } 429 430 sort.Strings(volumes) 431 return volumes 432 } 433 434 func nodeNetworkNames(n *api.Node) []string { 435 var networks []string 436 for name := range n.HostNetworks { 437 networks = append(networks, name) 438 } 439 440 sort.Strings(networks) 441 return networks 442 } 443 444 func formatDrain(n *api.Node) string { 445 if n.DrainStrategy != nil { 446 b := new(strings.Builder) 447 b.WriteString("true") 448 if n.DrainStrategy.DrainSpec.Deadline.Nanoseconds() < 0 { 449 b.WriteString("; force drain") 450 } else if n.DrainStrategy.ForceDeadline.IsZero() { 451 b.WriteString("; no deadline") 452 } else { 453 fmt.Fprintf(b, "; %s deadline", formatTime(n.DrainStrategy.ForceDeadline)) 454 } 455 456 if n.DrainStrategy.IgnoreSystemJobs { 457 b.WriteString("; ignoring system jobs") 458 } 459 return b.String() 460 } 461 462 return strconv.FormatBool(n.Drain) 463 } 464 465 func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { 466 // Make one API call for allocations 467 nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil) 468 if err != nil { 469 c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err)) 470 return 1 471 } 472 473 var runningAllocs []*api.Allocation 474 for _, alloc := range nodeAllocs { 475 if alloc.ClientStatus == "running" { 476 runningAllocs = append(runningAllocs, alloc) 477 } 478 } 479 480 // Format the header output 481 basic := []string{ 482 fmt.Sprintf("ID|%s", node.ID), 483 fmt.Sprintf("Name|%s", node.Name), 484 fmt.Sprintf("Node Pool|%s", node.NodePool), 485 fmt.Sprintf("Class|%s", node.NodeClass), 486 fmt.Sprintf("DC|%s", node.Datacenter), 487 fmt.Sprintf("Drain|%v", formatDrain(node)), 488 fmt.Sprintf("Eligibility|%s", node.SchedulingEligibility), 489 fmt.Sprintf("Status|%s", node.Status), 490 fmt.Sprintf("CSI Controllers|%s", strings.Join(nodeCSIControllerNames(node), ",")), 491 fmt.Sprintf("CSI Drivers|%s", strings.Join(nodeCSINodeNames(node), ",")), 492 } 493 494 if c.short { 495 basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ","))) 496 basic = append(basic, fmt.Sprintf("Host Networks|%s", strings.Join(nodeNetworkNames(node), ","))) 497 basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(runningAllocs), ","))) 498 basic = append(basic, fmt.Sprintf("Drivers|%s", strings.Join(nodeDrivers(node), ","))) 499 c.Ui.Output(c.Colorize().Color(formatKV(basic))) 500 501 // Output alloc info 502 if err := c.outputAllocInfo(node, nodeAllocs); err != nil { 503 c.Ui.Error(fmt.Sprintf("%s", err)) 504 return 1 505 } 506 507 return 0 508 } 509 510 // Get the host stats 511 hostStats, nodeStatsErr := client.Nodes().Stats(node.ID, nil) 512 if nodeStatsErr != nil { 513 c.Ui.Output("") 514 c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", nodeStatsErr)) 515 } 516 if hostStats != nil { 517 uptime := time.Duration(hostStats.Uptime * uint64(time.Second)) 518 basic = append(basic, fmt.Sprintf("Uptime|%s", uptime.String())) 519 } 520 521 // When we're not running in verbose mode, then also include host volumes and 522 // driver info in the basic output 523 if !c.verbose { 524 basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ","))) 525 basic = append(basic, fmt.Sprintf("Host Networks|%s", strings.Join(nodeNetworkNames(node), ","))) 526 basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(runningAllocs), ","))) 527 driverStatus := fmt.Sprintf("Driver Status| %s", c.outputTruncatedNodeDriverInfo(node)) 528 basic = append(basic, driverStatus) 529 } 530 531 // Output the basic info 532 c.Ui.Output(c.Colorize().Color(formatKV(basic))) 533 534 // If we're running in verbose mode, include full host volume and driver info 535 if c.verbose { 536 c.outputNodeVolumeInfo(node) 537 c.outputNodeNetworkInfo(node) 538 c.outputNodeCSIVolumeInfo(client, node, runningAllocs) 539 c.outputNodeDriverInfo(node) 540 } 541 542 // Emit node events 543 c.outputNodeStatusEvents(node) 544 545 // Get list of running allocations on the node 546 allocatedResources := getAllocatedResources(client, runningAllocs, node) 547 c.Ui.Output(c.Colorize().Color("\n[bold]Allocated Resources[reset]")) 548 c.Ui.Output(formatList(allocatedResources)) 549 550 actualResources, err := getActualResources(client, runningAllocs, node) 551 if err == nil { 552 c.Ui.Output(c.Colorize().Color("\n[bold]Allocation Resource Utilization[reset]")) 553 c.Ui.Output(formatList(actualResources)) 554 } 555 556 hostResources, err := getHostResources(hostStats, node) 557 if err != nil { 558 c.Ui.Output("") 559 c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", err)) 560 } 561 if err == nil { 562 c.Ui.Output(c.Colorize().Color("\n[bold]Host Resource Utilization[reset]")) 563 c.Ui.Output(formatList(hostResources)) 564 } 565 566 if err == nil && node.NodeResources != nil && len(node.NodeResources.Devices) > 0 { 567 c.Ui.Output(c.Colorize().Color("\n[bold]Device Resource Utilization[reset]")) 568 c.Ui.Output(formatList(getDeviceResourcesForNode(hostStats.DeviceStats, node))) 569 } 570 if hostStats != nil && c.stats { 571 c.Ui.Output(c.Colorize().Color("\n[bold]CPU Stats[reset]")) 572 c.printCpuStats(hostStats) 573 c.Ui.Output(c.Colorize().Color("\n[bold]Memory Stats[reset]")) 574 c.printMemoryStats(hostStats) 575 c.Ui.Output(c.Colorize().Color("\n[bold]Disk Stats[reset]")) 576 c.printDiskStats(hostStats) 577 if len(hostStats.DeviceStats) > 0 { 578 c.Ui.Output(c.Colorize().Color("\n[bold]Device Stats[reset]")) 579 printDeviceStats(c.Ui, hostStats.DeviceStats) 580 } 581 } 582 583 if err := c.outputAllocInfo(node, nodeAllocs); err != nil { 584 c.Ui.Error(fmt.Sprintf("%s", err)) 585 return 1 586 } 587 588 return 0 589 } 590 591 func (c *NodeStatusCommand) outputAllocInfo(node *api.Node, nodeAllocs []*api.Allocation) error { 592 c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]")) 593 c.Ui.Output(formatAllocList(nodeAllocs, c.verbose, c.length)) 594 595 if c.verbose { 596 c.formatAttributes(node) 597 c.formatDeviceAttributes(node) 598 c.formatMeta(node) 599 } 600 601 return nil 602 } 603 604 func (c *NodeStatusCommand) outputTruncatedNodeDriverInfo(node *api.Node) string { 605 drivers := make([]string, 0, len(node.Drivers)) 606 607 for driverName, driverInfo := range node.Drivers { 608 if !driverInfo.Detected { 609 continue 610 } 611 612 if !driverInfo.Healthy { 613 drivers = append(drivers, fmt.Sprintf("%s (unhealthy)", driverName)) 614 } else { 615 drivers = append(drivers, driverName) 616 } 617 } 618 sort.Strings(drivers) 619 return strings.Trim(strings.Join(drivers, ","), ", ") 620 } 621 622 func (c *NodeStatusCommand) outputNodeVolumeInfo(node *api.Node) { 623 624 names := make([]string, 0, len(node.HostVolumes)) 625 for name := range node.HostVolumes { 626 names = append(names, name) 627 } 628 sort.Strings(names) 629 630 output := make([]string, 0, len(names)+1) 631 output = append(output, "Name|ReadOnly|Source") 632 633 if len(names) > 0 { 634 c.Ui.Output(c.Colorize().Color("\n[bold]Host Volumes")) 635 for _, volName := range names { 636 info := node.HostVolumes[volName] 637 output = append(output, fmt.Sprintf("%s|%v|%s", volName, info.ReadOnly, info.Path)) 638 } 639 c.Ui.Output(formatList(output)) 640 } 641 } 642 643 func (c *NodeStatusCommand) outputNodeNetworkInfo(node *api.Node) { 644 645 names := make([]string, 0, len(node.HostNetworks)) 646 for name := range node.HostNetworks { 647 names = append(names, name) 648 } 649 sort.Strings(names) 650 651 output := make([]string, 0, len(names)+1) 652 output = append(output, "Name|CIDR|Interface|ReservedPorts") 653 654 if len(names) > 0 { 655 c.Ui.Output(c.Colorize().Color("\n[bold]Host Networks")) 656 for _, hostNetworkName := range names { 657 info := node.HostNetworks[hostNetworkName] 658 output = append(output, fmt.Sprintf("%s|%v|%s|%s", hostNetworkName, info.CIDR, info.Interface, info.ReservedPorts)) 659 } 660 c.Ui.Output(formatList(output)) 661 } 662 } 663 664 func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *api.Node, runningAllocs []*api.Allocation) { 665 666 // Duplicate nodeCSIVolumeNames to sort by name but also index volume names to ids 667 var names []string 668 requests := map[string]*api.VolumeRequest{} 669 for _, alloc := range runningAllocs { 670 tg := alloc.GetTaskGroup() 671 if tg == nil || len(tg.Volumes) == 0 { 672 continue 673 } 674 675 for _, v := range tg.Volumes { 676 if v.Type == api.CSIVolumeTypeCSI { 677 names = append(names, v.Name) 678 requests[v.Source] = v 679 } 680 } 681 } 682 if len(names) == 0 { 683 return 684 } 685 sort.Strings(names) 686 687 // Fetch the volume objects with current status 688 // Ignore an error, all we're going to do is omit the volumes 689 volumes := map[string]*api.CSIVolumeListStub{} 690 vs, _ := client.Nodes().CSIVolumes(node.ID, nil) 691 for _, v := range vs { 692 n, ok := requests[v.ID] 693 if ok { 694 volumes[n.Name] = v 695 } 696 } 697 698 if len(names) > 0 { 699 c.Ui.Output(c.Colorize().Color("\n[bold]CSI Volumes")) 700 701 // Output the volumes in name order 702 output := make([]string, 0, len(names)+1) 703 output = append(output, "ID|Name|Namespace|Plugin ID|Schedulable|Provider|Access Mode") 704 for _, name := range names { 705 v, ok := volumes[name] 706 if ok { 707 output = append(output, fmt.Sprintf( 708 "%s|%s|%s|%s|%t|%s|%s", 709 v.ID, 710 name, 711 v.Namespace, 712 v.PluginID, 713 v.Schedulable, 714 v.Provider, 715 v.AccessMode, 716 )) 717 } 718 } 719 720 c.Ui.Output(formatList(output)) 721 } 722 } 723 724 func (c *NodeStatusCommand) outputNodeDriverInfo(node *api.Node) { 725 c.Ui.Output(c.Colorize().Color("\n[bold]Drivers")) 726 727 size := len(node.Drivers) 728 nodeDrivers := make([]string, 0, size+1) 729 730 nodeDrivers = append(nodeDrivers, "Driver|Detected|Healthy|Message|Time") 731 732 drivers := make([]string, 0, len(node.Drivers)) 733 for driver := range node.Drivers { 734 drivers = append(drivers, driver) 735 } 736 sort.Strings(drivers) 737 738 for _, driver := range drivers { 739 info := node.Drivers[driver] 740 timestamp := formatTime(info.UpdateTime) 741 nodeDrivers = append(nodeDrivers, fmt.Sprintf("%s|%v|%v|%s|%s", driver, info.Detected, info.Healthy, info.HealthDescription, timestamp)) 742 } 743 c.Ui.Output(formatList(nodeDrivers)) 744 } 745 746 func (c *NodeStatusCommand) outputNodeStatusEvents(node *api.Node) { 747 c.Ui.Output(c.Colorize().Color("\n[bold]Node Events")) 748 c.outputNodeEvent(node.Events) 749 } 750 751 func (c *NodeStatusCommand) outputNodeEvent(events []*api.NodeEvent) { 752 size := len(events) 753 nodeEvents := make([]string, size+1) 754 if c.verbose { 755 nodeEvents[0] = "Time|Subsystem|Message|Details" 756 } else { 757 nodeEvents[0] = "Time|Subsystem|Message" 758 } 759 760 for i, event := range events { 761 timestamp := formatTime(event.Timestamp) 762 subsystem := formatEventSubsystem(event.Subsystem, event.Details["driver"]) 763 msg := event.Message 764 if c.verbose { 765 details := formatEventDetails(event.Details) 766 nodeEvents[size-i] = fmt.Sprintf("%s|%s|%s|%s", timestamp, subsystem, msg, details) 767 } else { 768 nodeEvents[size-i] = fmt.Sprintf("%s|%s|%s", timestamp, subsystem, msg) 769 } 770 } 771 c.Ui.Output(formatList(nodeEvents)) 772 } 773 774 func formatEventSubsystem(subsystem, driverName string) string { 775 if driverName == "" { 776 return subsystem 777 } 778 779 // If this event is for a driver, append the driver name to make the message 780 // clearer 781 return fmt.Sprintf("Driver: %s", driverName) 782 } 783 784 func formatEventDetails(details map[string]string) string { 785 output := make([]string, 0, len(details)) 786 for k, v := range details { 787 output = append(output, fmt.Sprintf("%s: %s", k, v)) 788 } 789 return strings.Join(output, ", ") 790 } 791 792 func (c *NodeStatusCommand) formatAttributes(node *api.Node) { 793 // Print the attributes 794 keys := make([]string, len(node.Attributes)) 795 for k := range node.Attributes { 796 keys = append(keys, k) 797 } 798 sort.Strings(keys) 799 800 var attributes []string 801 for _, k := range keys { 802 if k != "" { 803 attributes = append(attributes, fmt.Sprintf("%s|%s", k, node.Attributes[k])) 804 } 805 } 806 c.Ui.Output(c.Colorize().Color("\n[bold]Attributes[reset]")) 807 c.Ui.Output(formatKV(attributes)) 808 } 809 810 func (c *NodeStatusCommand) formatDeviceAttributes(node *api.Node) { 811 if node.NodeResources == nil { 812 return 813 } 814 devices := node.NodeResources.Devices 815 if len(devices) == 0 { 816 return 817 } 818 819 sort.Slice(devices, func(i, j int) bool { 820 return devices[i].ID() < devices[j].ID() 821 }) 822 823 first := true 824 for _, d := range devices { 825 if len(d.Attributes) == 0 { 826 continue 827 } 828 829 if first { 830 c.Ui.Output("\n[bold]Device Group Attributes[reset]") 831 first = false 832 } else { 833 c.Ui.Output("") 834 } 835 c.Ui.Output(formatKV(getDeviceAttributes(d))) 836 } 837 } 838 839 func (c *NodeStatusCommand) formatMeta(node *api.Node) { 840 c.Ui.Output(c.Colorize().Color("\n[bold]Meta[reset]")) 841 c.Ui.Output(formatNodeMeta(node.Meta)) 842 } 843 844 func (c *NodeStatusCommand) printCpuStats(hostStats *api.HostStats) { 845 l := len(hostStats.CPU) 846 for i, cpuStat := range hostStats.CPU { 847 cpuStatsAttr := make([]string, 4) 848 cpuStatsAttr[0] = fmt.Sprintf("CPU|%v", cpuStat.CPU) 849 cpuStatsAttr[1] = fmt.Sprintf("User|%v%%", humanize.FormatFloat(floatFormat, cpuStat.User)) 850 cpuStatsAttr[2] = fmt.Sprintf("System|%v%%", humanize.FormatFloat(floatFormat, cpuStat.System)) 851 cpuStatsAttr[3] = fmt.Sprintf("Idle|%v%%", humanize.FormatFloat(floatFormat, cpuStat.Idle)) 852 c.Ui.Output(formatKV(cpuStatsAttr)) 853 if i+1 < l { 854 c.Ui.Output("") 855 } 856 } 857 } 858 859 func (c *NodeStatusCommand) printMemoryStats(hostStats *api.HostStats) { 860 memoryStat := hostStats.Memory 861 memStatsAttr := make([]string, 4) 862 memStatsAttr[0] = fmt.Sprintf("Total|%v", humanize.IBytes(memoryStat.Total)) 863 memStatsAttr[1] = fmt.Sprintf("Available|%v", humanize.IBytes(memoryStat.Available)) 864 memStatsAttr[2] = fmt.Sprintf("Used|%v", humanize.IBytes(memoryStat.Used)) 865 memStatsAttr[3] = fmt.Sprintf("Free|%v", humanize.IBytes(memoryStat.Free)) 866 c.Ui.Output(formatKV(memStatsAttr)) 867 } 868 869 func (c *NodeStatusCommand) printDiskStats(hostStats *api.HostStats) { 870 l := len(hostStats.DiskStats) 871 for i, diskStat := range hostStats.DiskStats { 872 diskStatsAttr := make([]string, 7) 873 diskStatsAttr[0] = fmt.Sprintf("Device|%s", diskStat.Device) 874 diskStatsAttr[1] = fmt.Sprintf("MountPoint|%s", diskStat.Mountpoint) 875 diskStatsAttr[2] = fmt.Sprintf("Size|%s", humanize.IBytes(diskStat.Size)) 876 diskStatsAttr[3] = fmt.Sprintf("Used|%s", humanize.IBytes(diskStat.Used)) 877 diskStatsAttr[4] = fmt.Sprintf("Available|%s", humanize.IBytes(diskStat.Available)) 878 diskStatsAttr[5] = fmt.Sprintf("Used Percent|%v%%", humanize.FormatFloat(floatFormat, diskStat.UsedPercent)) 879 diskStatsAttr[6] = fmt.Sprintf("Inodes Percent|%v%%", humanize.FormatFloat(floatFormat, diskStat.InodesUsedPercent)) 880 c.Ui.Output(formatKV(diskStatsAttr)) 881 if i+1 < l { 882 c.Ui.Output("") 883 } 884 } 885 } 886 887 // getRunningAllocs returns a slice of allocation id's running on the node 888 func getRunningAllocs(client *api.Client, nodeID string) ([]*api.Allocation, error) { 889 var allocs []*api.Allocation 890 891 // Query the node allocations 892 nodeAllocs, _, err := client.Nodes().Allocations(nodeID, nil) 893 // Filter list to only running allocations 894 for _, alloc := range nodeAllocs { 895 if alloc.ClientStatus == "running" { 896 allocs = append(allocs, alloc) 897 } 898 } 899 return allocs, err 900 } 901 902 // getAllocatedResources returns the resource usage of the node. 903 func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation, node *api.Node) []string { 904 // Compute the total 905 total := computeNodeTotalResources(node) 906 907 // Get Resources 908 var cpu, mem, disk int 909 for _, alloc := range runningAllocs { 910 cpu += *alloc.Resources.CPU 911 mem += *alloc.Resources.MemoryMB 912 disk += *alloc.Resources.DiskMB 913 } 914 915 resources := make([]string, 2) 916 resources[0] = "CPU|Memory|Disk" 917 resources[1] = fmt.Sprintf("%d/%d MHz|%s/%s|%s/%s", 918 cpu, 919 *total.CPU, 920 humanize.IBytes(uint64(mem*bytesPerMegabyte)), 921 humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)), 922 humanize.IBytes(uint64(disk*bytesPerMegabyte)), 923 humanize.IBytes(uint64(*total.DiskMB*bytesPerMegabyte))) 924 925 return resources 926 } 927 928 // computeNodeTotalResources returns the total allocatable resources (resources 929 // minus reserved) 930 func computeNodeTotalResources(node *api.Node) api.Resources { 931 total := api.Resources{} 932 933 r := node.Resources 934 res := node.Reserved 935 if res == nil { 936 res = &api.Resources{} 937 } 938 total.CPU = pointer.Of(*r.CPU - *res.CPU) 939 total.MemoryMB = pointer.Of(*r.MemoryMB - *res.MemoryMB) 940 total.DiskMB = pointer.Of(*r.DiskMB - *res.DiskMB) 941 return total 942 } 943 944 // getActualResources returns the actual resource usage of the allocations. 945 func getActualResources(client *api.Client, runningAllocs []*api.Allocation, node *api.Node) ([]string, error) { 946 // Compute the total 947 total := computeNodeTotalResources(node) 948 949 // Get Resources 950 var cpu float64 951 var mem uint64 952 for _, alloc := range runningAllocs { 953 // Make the call to the client to get the actual usage. 954 stats, err := client.Allocations().Stats(alloc, nil) 955 if err != nil { 956 return nil, err 957 } 958 959 cpu += stats.ResourceUsage.CpuStats.TotalTicks 960 if stats.ResourceUsage.MemoryStats.Usage > 0 { 961 mem += stats.ResourceUsage.MemoryStats.Usage 962 } else { 963 mem += stats.ResourceUsage.MemoryStats.RSS 964 } 965 } 966 967 resources := make([]string, 2) 968 resources[0] = "CPU|Memory" 969 resources[1] = fmt.Sprintf("%v/%d MHz|%v/%v", 970 math.Floor(cpu), 971 *total.CPU, 972 humanize.IBytes(mem), 973 humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte))) 974 975 return resources, nil 976 } 977 978 // getHostResources returns the actual resource usage of the node. 979 func getHostResources(hostStats *api.HostStats, node *api.Node) ([]string, error) { 980 if hostStats == nil { 981 return nil, fmt.Errorf("actual resource usage not present") 982 } 983 var resources []string 984 985 // calculate disk usage 986 storageDevice := node.Attributes["unique.storage.volume"] 987 var diskUsed, diskSize uint64 988 var physical bool 989 for _, disk := range hostStats.DiskStats { 990 if disk.Device == storageDevice { 991 diskUsed = disk.Used 992 diskSize = disk.Size 993 physical = true 994 } 995 } 996 997 resources = make([]string, 2) 998 resources[0] = "CPU|Memory|Disk" 999 if physical { 1000 resources[1] = fmt.Sprintf("%v/%d MHz|%s/%s|%s/%s", 1001 math.Floor(hostStats.CPUTicksConsumed), 1002 *node.Resources.CPU, 1003 humanize.IBytes(hostStats.Memory.Used), 1004 humanize.IBytes(hostStats.Memory.Total), 1005 humanize.IBytes(diskUsed), 1006 humanize.IBytes(diskSize), 1007 ) 1008 } else { 1009 // If non-physical device are used, output device name only, 1010 // since nomad doesn't collect the stats data. 1011 resources[1] = fmt.Sprintf("%v/%d MHz|%s/%s|(%s)", 1012 math.Floor(hostStats.CPUTicksConsumed), 1013 *node.Resources.CPU, 1014 humanize.IBytes(hostStats.Memory.Used), 1015 humanize.IBytes(hostStats.Memory.Total), 1016 storageDevice, 1017 ) 1018 } 1019 return resources, nil 1020 } 1021 1022 // formatNodeStubList is used to return a table format of a list of node stubs. 1023 func formatNodeStubList(nodes []*api.NodeListStub, verbose bool) string { 1024 // Return error if no nodes are found 1025 if len(nodes) == 0 { 1026 return "" 1027 } 1028 // Truncate the id unless full length is requested 1029 length := shortId 1030 if verbose { 1031 length = fullId 1032 } 1033 1034 // Format the nodes list that matches the prefix so that the user 1035 // can create a more specific request 1036 out := make([]string, len(nodes)+1) 1037 out[0] = "ID|DC|Name|Class|Drain|Eligibility|Status" 1038 for i, node := range nodes { 1039 out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s|%s", 1040 limit(node.ID, length), 1041 node.Datacenter, 1042 node.Name, 1043 node.NodeClass, 1044 node.Drain, 1045 node.SchedulingEligibility, 1046 node.Status) 1047 } 1048 1049 return formatList(out) 1050 }