github.com/hernad/nomad@v1.6.112/command/helpers.go (about) 1 // Copyright (c) HashiCorp, Inc. 2 // SPDX-License-Identifier: MPL-2.0 3 4 package command 5 6 import ( 7 "bufio" 8 "bytes" 9 "encoding/json" 10 "fmt" 11 "io" 12 "os" 13 "path/filepath" 14 "strconv" 15 "strings" 16 "time" 17 18 gg "github.com/hashicorp/go-getter" 19 "github.com/hernad/nomad/api" 20 flaghelper "github.com/hernad/nomad/helper/flags" 21 "github.com/hernad/nomad/jobspec" 22 "github.com/hernad/nomad/jobspec2" 23 "github.com/kr/text" 24 "github.com/mitchellh/cli" 25 "github.com/posener/complete" 26 "github.com/ryanuber/columnize" 27 ) 28 29 const ( 30 formatJSON = "json" 31 formatHCL1 = "hcl1" 32 formatHCL2 = "hcl2" 33 ) 34 35 // maxLineLength is the maximum width of any line. 36 const maxLineLength int = 78 37 38 // formatKV takes a set of strings and formats them into properly 39 // aligned k = v pairs using the columnize library. 40 func formatKV(in []string) string { 41 columnConf := columnize.DefaultConfig() 42 columnConf.Empty = "<none>" 43 columnConf.Glue = " = " 44 return columnize.Format(in, columnConf) 45 } 46 47 // formatList takes a set of strings and formats them into properly 48 // aligned output, replacing any blank fields with a placeholder 49 // for awk-ability. 50 func formatList(in []string) string { 51 columnConf := columnize.DefaultConfig() 52 columnConf.Empty = "<none>" 53 return columnize.Format(in, columnConf) 54 } 55 56 // formatListWithSpaces takes a set of strings and formats them into properly 57 // aligned output. It should be used sparingly since it doesn't replace empty 58 // values and hence not awk/sed friendly 59 func formatListWithSpaces(in []string) string { 60 columnConf := columnize.DefaultConfig() 61 return columnize.Format(in, columnConf) 62 } 63 64 // Limits the length of the string. 65 func limit(s string, length int) string { 66 if len(s) < length { 67 return s 68 } 69 70 return s[:length] 71 } 72 73 // indentString returns the string s padded with the given number of empty 74 // spaces before each line except for the first one. 75 func indentString(s string, pad int) string { 76 prefix := strings.Repeat(" ", pad) 77 return strings.Join(strings.Split(s, "\n"), fmt.Sprintf("\n%s", prefix)) 78 } 79 80 // wrapAtLengthWithPadding wraps the given text at the maxLineLength, taking 81 // into account any provided left padding. 82 func wrapAtLengthWithPadding(s string, pad int) string { 83 wrapped := text.Wrap(s, maxLineLength-pad) 84 lines := strings.Split(wrapped, "\n") 85 for i, line := range lines { 86 lines[i] = strings.Repeat(" ", pad) + line 87 } 88 return strings.Join(lines, "\n") 89 } 90 91 // wrapAtLength wraps the given text to maxLineLength. 92 func wrapAtLength(s string) string { 93 return wrapAtLengthWithPadding(s, 0) 94 } 95 96 // formatTime formats the time to string based on RFC822 97 func formatTime(t time.Time) string { 98 if t.Unix() < 1 { 99 // It's more confusing to display the UNIX epoch or a zero value than nothing 100 return "" 101 } 102 // Return ISO_8601 time format GH-3806 103 return t.Format("2006-01-02T15:04:05Z07:00") 104 } 105 106 // formatUnixNanoTime is a helper for formatting time for output. 107 func formatUnixNanoTime(nano int64) string { 108 t := time.Unix(0, nano) 109 return formatTime(t) 110 } 111 112 // formatTimeDifference takes two times and determines their duration difference 113 // truncating to a passed unit. 114 // E.g. formatTimeDifference(first=1m22s33ms, second=1m28s55ms, time.Second) -> 6s 115 func formatTimeDifference(first, second time.Time, d time.Duration) string { 116 return second.Truncate(d).Sub(first.Truncate(d)).String() 117 } 118 119 // fmtInt formats v into the tail of buf. 120 // It returns the index where the output begins. 121 func fmtInt(buf []byte, v uint64) int { 122 w := len(buf) 123 for v > 0 { 124 w-- 125 buf[w] = byte(v%10) + '0' 126 v /= 10 127 } 128 return w 129 } 130 131 // prettyTimeDiff prints a human readable time difference. 132 // It uses abbreviated forms for each period - s for seconds, m for minutes, h for hours, 133 // d for days, mo for months, and y for years. Time difference is rounded to the nearest second, 134 // and the top two least granular periods are returned. For example, if the time difference 135 // is 10 months, 12 days, 3 hours and 2 seconds, the string "10mo12d" is returned. Zero values return the empty string 136 func prettyTimeDiff(first, second time.Time) string { 137 // handle zero values 138 if first.IsZero() || first.UnixNano() == 0 { 139 return "" 140 } 141 // round to the nearest second 142 first = first.Round(time.Second) 143 second = second.Round(time.Second) 144 145 // calculate time difference in seconds 146 var d time.Duration 147 messageSuffix := "ago" 148 if second.Equal(first) || second.After(first) { 149 d = second.Sub(first) 150 } else { 151 d = first.Sub(second) 152 messageSuffix = "from now" 153 } 154 155 u := uint64(d.Seconds()) 156 157 var buf [32]byte 158 w := len(buf) 159 secs := u % 60 160 161 // track indexes of various periods 162 var indexes []int 163 164 if secs > 0 { 165 w-- 166 buf[w] = 's' 167 // u is now seconds 168 w = fmtInt(buf[:w], secs) 169 indexes = append(indexes, w) 170 } 171 u /= 60 172 // u is now minutes 173 if u > 0 { 174 mins := u % 60 175 if mins > 0 { 176 w-- 177 buf[w] = 'm' 178 w = fmtInt(buf[:w], mins) 179 indexes = append(indexes, w) 180 } 181 u /= 60 182 // u is now hours 183 if u > 0 { 184 hrs := u % 24 185 if hrs > 0 { 186 w-- 187 buf[w] = 'h' 188 w = fmtInt(buf[:w], hrs) 189 indexes = append(indexes, w) 190 } 191 u /= 24 192 } 193 // u is now days 194 if u > 0 { 195 days := u % 30 196 if days > 0 { 197 w-- 198 buf[w] = 'd' 199 w = fmtInt(buf[:w], days) 200 indexes = append(indexes, w) 201 } 202 u /= 30 203 } 204 // u is now months 205 if u > 0 { 206 months := u % 12 207 if months > 0 { 208 w-- 209 buf[w] = 'o' 210 w-- 211 buf[w] = 'm' 212 w = fmtInt(buf[:w], months) 213 indexes = append(indexes, w) 214 } 215 u /= 12 216 } 217 // u is now years 218 if u > 0 { 219 w-- 220 buf[w] = 'y' 221 w = fmtInt(buf[:w], u) 222 indexes = append(indexes, w) 223 } 224 } 225 start := w 226 end := len(buf) 227 228 // truncate to the first two periods 229 num_periods := len(indexes) 230 if num_periods > 2 { 231 end = indexes[num_periods-3] 232 } 233 if start == end { //edge case when time difference is less than a second 234 return "0s " + messageSuffix 235 } else { 236 return string(buf[start:end]) + " " + messageSuffix 237 } 238 239 } 240 241 // getLocalNodeID returns the node ID of the local Nomad Client and an error if 242 // it couldn't be determined or the Agent is not running in Client mode. 243 func getLocalNodeID(client *api.Client) (string, error) { 244 info, err := client.Agent().Self() 245 if err != nil { 246 return "", fmt.Errorf("Error querying agent info: %s", err) 247 } 248 clientStats, ok := info.Stats["client"] 249 if !ok { 250 return "", fmt.Errorf("Nomad not running in client mode") 251 } 252 253 nodeID, ok := clientStats["node_id"] 254 if !ok { 255 return "", fmt.Errorf("Failed to determine node ID") 256 } 257 258 return nodeID, nil 259 } 260 261 // evalFailureStatus returns whether the evaluation has failures and a string to 262 // display when presenting users with whether there are failures for the eval 263 func evalFailureStatus(eval *api.Evaluation) (string, bool) { 264 if eval == nil { 265 return "", false 266 } 267 268 hasFailures := len(eval.FailedTGAllocs) != 0 269 text := strconv.FormatBool(hasFailures) 270 if eval.Status == "blocked" { 271 text = "N/A - In Progress" 272 } 273 274 return text, hasFailures 275 } 276 277 // LineLimitReader wraps another reader and provides `tail -n` like behavior. 278 // LineLimitReader buffers up to the searchLimit and returns `-n` number of 279 // lines. After those lines have been returned, LineLimitReader streams the 280 // underlying ReadCloser 281 type LineLimitReader struct { 282 io.ReadCloser 283 lines int 284 searchLimit int 285 286 timeLimit time.Duration 287 lastRead time.Time 288 289 buffer *bytes.Buffer 290 bufFiled bool 291 foundLines bool 292 } 293 294 // NewLineLimitReader takes the ReadCloser to wrap, the number of lines to find 295 // searching backwards in the first searchLimit bytes. timeLimit can optionally 296 // be specified by passing a non-zero duration. When set, the search for the 297 // last n lines is aborted if no data has been read in the duration. This 298 // can be used to flush what is had if no extra data is being received. When 299 // used, the underlying reader must not block forever and must periodically 300 // unblock even when no data has been read. 301 func NewLineLimitReader(r io.ReadCloser, lines, searchLimit int, timeLimit time.Duration) *LineLimitReader { 302 return &LineLimitReader{ 303 ReadCloser: r, 304 searchLimit: searchLimit, 305 timeLimit: timeLimit, 306 lines: lines, 307 buffer: bytes.NewBuffer(make([]byte, 0, searchLimit)), 308 } 309 } 310 311 func (l *LineLimitReader) Read(p []byte) (n int, err error) { 312 // Fill up the buffer so we can find the correct number of lines. 313 if !l.bufFiled { 314 b := make([]byte, len(p)) 315 n, err := l.ReadCloser.Read(b) 316 if n > 0 { 317 if _, err := l.buffer.Write(b[:n]); err != nil { 318 return 0, err 319 } 320 } 321 322 if err != nil { 323 if err != io.EOF { 324 return 0, err 325 } 326 327 l.bufFiled = true 328 goto READ 329 } 330 331 if l.buffer.Len() >= l.searchLimit { 332 l.bufFiled = true 333 goto READ 334 } 335 336 if l.timeLimit.Nanoseconds() > 0 { 337 if l.lastRead.IsZero() { 338 l.lastRead = time.Now() 339 return 0, nil 340 } 341 342 now := time.Now() 343 if n == 0 { 344 // We hit the limit 345 if l.lastRead.Add(l.timeLimit).Before(now) { 346 l.bufFiled = true 347 goto READ 348 } else { 349 return 0, nil 350 } 351 } else { 352 l.lastRead = now 353 } 354 } 355 356 return 0, nil 357 } 358 359 READ: 360 if l.bufFiled && l.buffer.Len() != 0 { 361 b := l.buffer.Bytes() 362 363 // Find the lines 364 if !l.foundLines { 365 found := 0 366 i := len(b) - 1 367 sep := byte('\n') 368 lastIndex := len(b) - 1 369 for ; found < l.lines && i >= 0; i-- { 370 if b[i] == sep { 371 lastIndex = i 372 373 // Skip the first one 374 if i != len(b)-1 { 375 found++ 376 } 377 } 378 } 379 380 // We found them all 381 if found == l.lines { 382 // Clear the buffer until the last index 383 l.buffer.Next(lastIndex + 1) 384 } 385 386 l.foundLines = true 387 } 388 389 // Read from the buffer 390 n := copy(p, l.buffer.Next(len(p))) 391 return n, nil 392 } 393 394 // Just stream from the underlying reader now 395 return l.ReadCloser.Read(p) 396 } 397 398 // JobGetter provides helpers for retrieving and parsing a jobpsec. 399 type JobGetter struct { 400 HCL1 bool 401 Vars flaghelper.StringFlag 402 VarFiles flaghelper.StringFlag 403 Strict bool 404 JSON bool 405 406 // The fields below can be overwritten for tests 407 testStdin io.Reader 408 } 409 410 func (j *JobGetter) Validate() error { 411 if j.HCL1 && j.Strict { 412 return fmt.Errorf("cannot parse job file as HCLv1 and HCLv2 strict.") 413 } 414 if j.HCL1 && j.JSON { 415 return fmt.Errorf("cannot parse job file as HCL and JSON.") 416 } 417 if len(j.Vars) > 0 && j.JSON { 418 return fmt.Errorf("cannot use variables with JSON files.") 419 } 420 if len(j.VarFiles) > 0 && j.JSON { 421 return fmt.Errorf("cannot use variables with JSON files.") 422 } 423 if len(j.Vars) > 0 && j.HCL1 { 424 return fmt.Errorf("cannot use variables with HCLv1.") 425 } 426 if len(j.VarFiles) > 0 && j.HCL1 { 427 return fmt.Errorf("cannot use variables with HCLv1.") 428 } 429 return nil 430 } 431 432 // ApiJob returns the Job struct from jobfile. 433 func (j *JobGetter) ApiJob(jpath string) (*api.JobSubmission, *api.Job, error) { 434 return j.Get(jpath) 435 } 436 437 func (j *JobGetter) Get(jpath string) (*api.JobSubmission, *api.Job, error) { 438 var jobfile io.Reader 439 pathName := filepath.Base(jpath) 440 switch jpath { 441 case "-": 442 if j.testStdin != nil { 443 jobfile = j.testStdin 444 } else { 445 jobfile = os.Stdin 446 } 447 pathName = "stdin" 448 default: 449 if len(jpath) == 0 { 450 return nil, nil, fmt.Errorf("Error jobfile path has to be specified.") 451 } 452 453 jobFile, err := os.CreateTemp("", "jobfile") 454 if err != nil { 455 return nil, nil, err 456 } 457 defer os.Remove(jobFile.Name()) 458 459 if err := jobFile.Close(); err != nil { 460 return nil, nil, err 461 } 462 463 // Get the pwd 464 pwd, err := os.Getwd() 465 if err != nil { 466 return nil, nil, err 467 } 468 469 client := &gg.Client{ 470 Src: jpath, 471 Pwd: pwd, 472 Dst: jobFile.Name(), 473 474 // This will prevent copying or writing files through symlinks 475 DisableSymlinks: true, 476 } 477 478 if err := client.Get(); err != nil { 479 return nil, nil, fmt.Errorf("Error getting jobfile from %q: %v", jpath, err) 480 } else { 481 file, err := os.Open(jobFile.Name()) 482 if err != nil { 483 return nil, nil, fmt.Errorf("Error opening file %q: %v", jpath, err) 484 } 485 defer file.Close() 486 jobfile = file 487 } 488 } 489 490 // Parse the JobFile 491 var jobStruct *api.Job // deserialized destination 492 var source bytes.Buffer // tee the original 493 var jobSubmission *api.JobSubmission // store the original and format 494 jobfile = io.TeeReader(jobfile, &source) 495 var err error 496 switch { 497 case j.HCL1: 498 jobStruct, err = jobspec.Parse(jobfile) 499 500 // include the hcl1 source as the submission 501 jobSubmission = &api.JobSubmission{ 502 Source: source.String(), 503 Format: formatHCL1, 504 } 505 case j.JSON: 506 507 // Support JSON files with both a top-level Job key as well as 508 // ones without. 509 eitherJob := struct { 510 NestedJob *api.Job `json:"Job"` 511 api.Job 512 }{} 513 514 if err := json.NewDecoder(jobfile).Decode(&eitherJob); err != nil { 515 return nil, nil, fmt.Errorf("Failed to parse JSON job: %w", err) 516 } 517 518 if eitherJob.NestedJob != nil { 519 jobStruct = eitherJob.NestedJob 520 } else { 521 jobStruct = &eitherJob.Job 522 } 523 524 // include the json source as the submission 525 jobSubmission = &api.JobSubmission{ 526 Source: source.String(), 527 Format: formatJSON, 528 } 529 default: 530 // we are parsing HCL2 531 532 // make a copy of the job file (or stdio) 533 if _, err = io.Copy(&source, jobfile); err != nil { 534 return nil, nil, fmt.Errorf("Failed to parse HCL job: %w", err) 535 } 536 537 // we are parsing HCL2, whether from a file or stdio 538 jobStruct, err = jobspec2.ParseWithConfig(&jobspec2.ParseConfig{ 539 Path: pathName, 540 Body: source.Bytes(), 541 ArgVars: j.Vars, 542 AllowFS: true, 543 VarFiles: j.VarFiles, 544 Envs: os.Environ(), 545 Strict: j.Strict, 546 }) 547 548 var varFileCat string 549 var readVarFileErr error 550 if err == nil { 551 // combine any -var-file data into one big blob 552 varFileCat, readVarFileErr = extractVarFiles([]string(j.VarFiles)) 553 if readVarFileErr != nil { 554 return nil, nil, fmt.Errorf("Failed to read var file(s): %w", readVarFileErr) 555 } 556 } 557 558 // submit the job with the submission with content from -var flags 559 jobSubmission = &api.JobSubmission{ 560 VariableFlags: extractVarFlags(j.Vars), 561 Variables: varFileCat, 562 Source: source.String(), 563 Format: formatHCL2, 564 } 565 if err != nil { 566 if _, merr := jobspec.Parse(&source); merr == nil { 567 return nil, nil, fmt.Errorf("Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:\n%v", err) 568 } 569 } 570 } 571 572 if err != nil { 573 return nil, nil, fmt.Errorf("Error parsing job file from %s:\n%v", jpath, err) 574 } 575 576 return jobSubmission, jobStruct, nil 577 } 578 579 // extractVarFiles concatenates the content of each file in filenames and 580 // returns it all as one big content blob 581 func extractVarFiles(filenames []string) (string, error) { 582 var sb strings.Builder 583 for _, filename := range filenames { 584 b, err := os.ReadFile(filename) 585 if err != nil { 586 return "", err 587 } 588 sb.WriteString(string(b)) 589 sb.WriteString("\n") 590 } 591 return sb.String(), nil 592 } 593 594 // extractVarFlags is used to parse the values of -var command line arguments 595 // and turn them into a map to be used for submission. The result is never 596 // nil for convenience. 597 func extractVarFlags(slice []string) map[string]string { 598 m := make(map[string]string, len(slice)) 599 for _, s := range slice { 600 if tokens := strings.SplitN(s, "=", 2); len(tokens) == 1 { 601 m[tokens[0]] = "" 602 } else { 603 m[tokens[0]] = tokens[1] 604 } 605 } 606 return m 607 } 608 609 // mergeAutocompleteFlags is used to join multiple flag completion sets. 610 func mergeAutocompleteFlags(flags ...complete.Flags) complete.Flags { 611 merged := make(map[string]complete.Predictor, len(flags)) 612 for _, f := range flags { 613 for k, v := range f { 614 merged[k] = v 615 } 616 } 617 return merged 618 } 619 620 // sanitizeUUIDPrefix is used to sanitize a UUID prefix. The returned result 621 // will be a truncated version of the prefix if the prefix would not be 622 // queryable. 623 func sanitizeUUIDPrefix(prefix string) string { 624 hyphens := strings.Count(prefix, "-") 625 length := len(prefix) - hyphens 626 remainder := length % 2 627 return prefix[:len(prefix)-remainder] 628 } 629 630 // commandErrorText is used to easily render the same messaging across commands 631 // when an error is printed. 632 func commandErrorText(cmd NamedCommand) string { 633 return fmt.Sprintf("For additional help try 'nomad %s -help'", cmd.Name()) 634 } 635 636 // uiErrorWriter is a io.Writer that wraps underlying ui.ErrorWriter(). 637 // ui.ErrorWriter expects full lines as inputs and it emits its own line breaks. 638 // 639 // uiErrorWriter scans input for individual lines to pass to ui.ErrorWriter. If data 640 // doesn't contain a new line, it buffers result until next new line or writer is closed. 641 type uiErrorWriter struct { 642 ui cli.Ui 643 buf bytes.Buffer 644 } 645 646 func (w *uiErrorWriter) Write(data []byte) (int, error) { 647 read := 0 648 for len(data) != 0 { 649 a, token, err := bufio.ScanLines(data, false) 650 if err != nil { 651 return read, err 652 } 653 654 if a == 0 { 655 r, err := w.buf.Write(data) 656 return read + r, err 657 } 658 659 w.ui.Error(w.buf.String() + string(token)) 660 data = data[a:] 661 w.buf.Reset() 662 read += a 663 } 664 665 return read, nil 666 } 667 668 func (w *uiErrorWriter) Close() error { 669 // emit what's remaining 670 if w.buf.Len() != 0 { 671 w.ui.Error(w.buf.String()) 672 w.buf.Reset() 673 } 674 return nil 675 } 676 677 func loadDataSource(data string, testStdin io.Reader) (string, error) { 678 // Handle empty quoted shell parameters 679 if len(data) == 0 { 680 return "", nil 681 } 682 683 switch data[0] { 684 case '@': 685 return loadFromFile(data[1:]) 686 case '-': 687 if len(data) > 1 { 688 return data, nil 689 } 690 return loadFromStdin(testStdin) 691 default: 692 return data, nil 693 } 694 } 695 696 func loadFromFile(path string) (string, error) { 697 data, err := os.ReadFile(path) 698 if err != nil { 699 return "", fmt.Errorf("Failed to read file: %v", err) 700 } 701 return string(data), nil 702 } 703 704 func loadFromStdin(testStdin io.Reader) (string, error) { 705 var stdin io.Reader = os.Stdin 706 if testStdin != nil { 707 stdin = testStdin 708 } 709 710 var b bytes.Buffer 711 if _, err := io.Copy(&b, stdin); err != nil { 712 return "", fmt.Errorf("Failed to read stdin: %v", err) 713 } 714 return b.String(), nil 715 }