github.com/mongey/nomad@v0.5.2/command/agent/fs_endpoint.go (about) 1 package agent 2 3 //go:generate codecgen -o fs_endpoint.generated.go fs_endpoint.go 4 5 import ( 6 "bytes" 7 "fmt" 8 "io" 9 "math" 10 "net/http" 11 "os" 12 "path/filepath" 13 "sort" 14 "strconv" 15 "strings" 16 "sync" 17 "syscall" 18 "time" 19 20 "gopkg.in/tomb.v1" 21 22 "github.com/docker/docker/pkg/ioutils" 23 "github.com/hashicorp/nomad/client/allocdir" 24 "github.com/hpcloud/tail/watch" 25 "github.com/ugorji/go/codec" 26 ) 27 28 var ( 29 allocIDNotPresentErr = fmt.Errorf("must provide a valid alloc id") 30 fileNameNotPresentErr = fmt.Errorf("must provide a file name") 31 taskNotPresentErr = fmt.Errorf("must provide task name") 32 logTypeNotPresentErr = fmt.Errorf("must provide log type (stdout/stderr)") 33 clientNotRunning = fmt.Errorf("node is not running a Nomad Client") 34 invalidOrigin = fmt.Errorf("origin must be start or end") 35 ) 36 37 const ( 38 // streamFrameSize is the maximum number of bytes to send in a single frame 39 streamFrameSize = 64 * 1024 40 41 // streamHeartbeatRate is the rate at which a heartbeat will occur to detect 42 // a closed connection without sending any additional data 43 streamHeartbeatRate = 1 * time.Second 44 45 // streamBatchWindow is the window in which file content is batched before 46 // being flushed if the frame size has not been hit. 47 streamBatchWindow = 200 * time.Millisecond 48 49 // nextLogCheckRate is the rate at which we check for a log entry greater 50 // than what we are watching for. This is to handle the case in which logs 51 // rotate faster than we can detect and we have to rely on a normal 52 // directory listing. 53 nextLogCheckRate = 100 * time.Millisecond 54 55 // deleteEvent and truncateEvent are the file events that can be sent in a 56 // StreamFrame 57 deleteEvent = "file deleted" 58 truncateEvent = "file truncated" 59 60 // OriginStart and OriginEnd are the available parameters for the origin 61 // argument when streaming a file. They respectively offset from the start 62 // and end of a file. 63 OriginStart = "start" 64 OriginEnd = "end" 65 ) 66 67 func (s *HTTPServer) FsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { 68 if s.agent.client == nil { 69 return nil, clientNotRunning 70 } 71 72 path := strings.TrimPrefix(req.URL.Path, "/v1/client/fs/") 73 switch { 74 case strings.HasPrefix(path, "ls/"): 75 return s.DirectoryListRequest(resp, req) 76 case strings.HasPrefix(path, "stat/"): 77 return s.FileStatRequest(resp, req) 78 case strings.HasPrefix(path, "readat/"): 79 return s.FileReadAtRequest(resp, req) 80 case strings.HasPrefix(path, "cat/"): 81 return s.FileCatRequest(resp, req) 82 case strings.HasPrefix(path, "stream/"): 83 return s.Stream(resp, req) 84 case strings.HasPrefix(path, "logs/"): 85 return s.Logs(resp, req) 86 default: 87 return nil, CodedError(404, ErrInvalidMethod) 88 } 89 } 90 91 func (s *HTTPServer) DirectoryListRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { 92 var allocID, path string 93 94 if allocID = strings.TrimPrefix(req.URL.Path, "/v1/client/fs/ls/"); allocID == "" { 95 return nil, allocIDNotPresentErr 96 } 97 if path = req.URL.Query().Get("path"); path == "" { 98 path = "/" 99 } 100 fs, err := s.agent.client.GetAllocFS(allocID) 101 if err != nil { 102 return nil, err 103 } 104 return fs.List(path) 105 } 106 107 func (s *HTTPServer) FileStatRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { 108 var allocID, path string 109 if allocID = strings.TrimPrefix(req.URL.Path, "/v1/client/fs/stat/"); allocID == "" { 110 return nil, allocIDNotPresentErr 111 } 112 if path = req.URL.Query().Get("path"); path == "" { 113 return nil, fileNameNotPresentErr 114 } 115 fs, err := s.agent.client.GetAllocFS(allocID) 116 if err != nil { 117 return nil, err 118 } 119 return fs.Stat(path) 120 } 121 122 func (s *HTTPServer) FileReadAtRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { 123 var allocID, path string 124 var offset, limit int64 125 var err error 126 127 q := req.URL.Query() 128 129 if allocID = strings.TrimPrefix(req.URL.Path, "/v1/client/fs/readat/"); allocID == "" { 130 return nil, allocIDNotPresentErr 131 } 132 if path = q.Get("path"); path == "" { 133 return nil, fileNameNotPresentErr 134 } 135 136 if offset, err = strconv.ParseInt(q.Get("offset"), 10, 64); err != nil { 137 return nil, fmt.Errorf("error parsing offset: %v", err) 138 } 139 140 // Parse the limit 141 if limitStr := q.Get("limit"); limitStr != "" { 142 if limit, err = strconv.ParseInt(limitStr, 10, 64); err != nil { 143 return nil, fmt.Errorf("error parsing limit: %v", err) 144 } 145 } 146 147 fs, err := s.agent.client.GetAllocFS(allocID) 148 if err != nil { 149 return nil, err 150 } 151 152 rc, err := fs.ReadAt(path, offset) 153 if limit > 0 { 154 rc = &ReadCloserWrapper{ 155 Reader: io.LimitReader(rc, limit), 156 Closer: rc, 157 } 158 } 159 160 if err != nil { 161 return nil, err 162 } 163 164 io.Copy(resp, rc) 165 return nil, rc.Close() 166 } 167 168 // ReadCloserWrapper wraps a LimitReader so that a file is closed once it has been 169 // read 170 type ReadCloserWrapper struct { 171 io.Reader 172 io.Closer 173 } 174 175 func (s *HTTPServer) FileCatRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { 176 var allocID, path string 177 var err error 178 179 q := req.URL.Query() 180 181 if allocID = strings.TrimPrefix(req.URL.Path, "/v1/client/fs/cat/"); allocID == "" { 182 return nil, allocIDNotPresentErr 183 } 184 if path = q.Get("path"); path == "" { 185 return nil, fileNameNotPresentErr 186 } 187 fs, err := s.agent.client.GetAllocFS(allocID) 188 if err != nil { 189 return nil, err 190 } 191 192 fileInfo, err := fs.Stat(path) 193 if err != nil { 194 return nil, err 195 } 196 if fileInfo.IsDir { 197 return nil, fmt.Errorf("file %q is a directory", path) 198 } 199 200 r, err := fs.ReadAt(path, int64(0)) 201 if err != nil { 202 return nil, err 203 } 204 io.Copy(resp, r) 205 return nil, r.Close() 206 } 207 208 var ( 209 // HeartbeatStreamFrame is the StreamFrame to send as a heartbeat, avoiding 210 // creating many instances of the empty StreamFrame 211 HeartbeatStreamFrame = &StreamFrame{} 212 ) 213 214 // StreamFrame is used to frame data of a file when streaming 215 type StreamFrame struct { 216 // Offset is the offset the data was read from 217 Offset int64 `json:",omitempty"` 218 219 // Data is the read data 220 Data []byte `json:",omitempty"` 221 222 // File is the file that the data was read from 223 File string `json:",omitempty"` 224 225 // FileEvent is the last file event that occurred that could cause the 226 // streams position to change or end 227 FileEvent string `json:",omitempty"` 228 } 229 230 // IsHeartbeat returns if the frame is a heartbeat frame 231 func (s *StreamFrame) IsHeartbeat() bool { 232 return s.Offset == 0 && len(s.Data) == 0 && s.File == "" && s.FileEvent == "" 233 } 234 235 func (s *StreamFrame) Clear() { 236 s.Offset = 0 237 s.Data = nil 238 s.File = "" 239 s.FileEvent = "" 240 } 241 242 func (s *StreamFrame) IsCleared() bool { 243 if s.Offset != 0 { 244 return false 245 } else if s.Data != nil { 246 return false 247 } else if s.File != "" { 248 return false 249 } else if s.FileEvent != "" { 250 return false 251 } else { 252 return true 253 } 254 } 255 256 // StreamFramer is used to buffer and send frames as well as heartbeat. 257 type StreamFramer struct { 258 out io.WriteCloser 259 enc *codec.Encoder 260 encLock sync.Mutex 261 262 frameSize int 263 264 heartbeat *time.Ticker 265 flusher *time.Ticker 266 267 shutdownCh chan struct{} 268 exitCh chan struct{} 269 270 // The mutex protects everything below 271 l sync.Mutex 272 273 // The current working frame 274 f StreamFrame 275 data *bytes.Buffer 276 277 // Captures whether the framer is running and any error that occurred to 278 // cause it to stop. 279 running bool 280 Err error 281 } 282 283 // NewStreamFramer creates a new stream framer that will output StreamFrames to 284 // the passed output. 285 func NewStreamFramer(out io.WriteCloser, heartbeatRate, batchWindow time.Duration, frameSize int) *StreamFramer { 286 // Create a JSON encoder 287 enc := codec.NewEncoder(out, jsonHandle) 288 289 // Create the heartbeat and flush ticker 290 heartbeat := time.NewTicker(heartbeatRate) 291 flusher := time.NewTicker(batchWindow) 292 293 return &StreamFramer{ 294 out: out, 295 enc: enc, 296 frameSize: frameSize, 297 heartbeat: heartbeat, 298 flusher: flusher, 299 data: bytes.NewBuffer(make([]byte, 0, 2*frameSize)), 300 shutdownCh: make(chan struct{}), 301 exitCh: make(chan struct{}), 302 } 303 } 304 305 // Destroy is used to cleanup the StreamFramer and flush any pending frames 306 func (s *StreamFramer) Destroy() { 307 s.l.Lock() 308 close(s.shutdownCh) 309 s.heartbeat.Stop() 310 s.flusher.Stop() 311 running := s.running 312 s.l.Unlock() 313 314 // Ensure things were flushed 315 if running { 316 <-s.exitCh 317 } 318 s.out.Close() 319 } 320 321 // Run starts a long lived goroutine that handles sending data as well as 322 // heartbeating 323 func (s *StreamFramer) Run() { 324 s.l.Lock() 325 defer s.l.Unlock() 326 if s.running { 327 return 328 } 329 330 s.running = true 331 go s.run() 332 } 333 334 // ExitCh returns a channel that will be closed when the run loop terminates. 335 func (s *StreamFramer) ExitCh() <-chan struct{} { 336 return s.exitCh 337 } 338 339 // run is the internal run method. It exits if Destroy is called or an error 340 // occurs, in which case the exit channel is closed. 341 func (s *StreamFramer) run() { 342 var err error 343 defer func() { 344 close(s.exitCh) 345 s.l.Lock() 346 s.running = false 347 s.Err = err 348 s.l.Unlock() 349 }() 350 351 OUTER: 352 for { 353 select { 354 case <-s.shutdownCh: 355 break OUTER 356 case <-s.flusher.C: 357 // Skip if there is nothing to flush 358 s.l.Lock() 359 if s.f.IsCleared() { 360 s.l.Unlock() 361 continue 362 } 363 364 // Read the data for the frame, and send it 365 s.f.Data = s.readData() 366 err = s.send(&s.f) 367 s.f.Clear() 368 s.l.Unlock() 369 if err != nil { 370 return 371 } 372 case <-s.heartbeat.C: 373 // Send a heartbeat frame 374 if err = s.send(HeartbeatStreamFrame); err != nil { 375 return 376 } 377 } 378 } 379 380 s.l.Lock() 381 if !s.f.IsCleared() { 382 s.f.Data = s.readData() 383 err = s.send(&s.f) 384 s.f.Clear() 385 } 386 s.l.Unlock() 387 } 388 389 // send takes a StreamFrame, encodes and sends it 390 func (s *StreamFramer) send(f *StreamFrame) error { 391 s.encLock.Lock() 392 defer s.encLock.Unlock() 393 return s.enc.Encode(f) 394 } 395 396 // readData is a helper which reads the buffered data returning up to the frame 397 // size of data. Must be called with the lock held. The returned value is 398 // invalid on the next read or write into the StreamFramer buffer 399 func (s *StreamFramer) readData() []byte { 400 // Compute the amount to read from the buffer 401 size := s.data.Len() 402 if size > s.frameSize { 403 size = s.frameSize 404 } 405 if size == 0 { 406 return nil 407 } 408 d := s.data.Next(size) 409 return d 410 } 411 412 // Send creates and sends a StreamFrame based on the passed parameters. An error 413 // is returned if the run routine hasn't run or encountered an error. Send is 414 // asyncronous and does not block for the data to be transferred. 415 func (s *StreamFramer) Send(file, fileEvent string, data []byte, offset int64) error { 416 s.l.Lock() 417 defer s.l.Unlock() 418 419 // If we are not running, return the error that caused us to not run or 420 // indicated that it was never started. 421 if !s.running { 422 if s.Err != nil { 423 return s.Err 424 } 425 426 return fmt.Errorf("StreamFramer not running") 427 } 428 429 // Check if not mergeable 430 if !s.f.IsCleared() && (s.f.File != file || s.f.FileEvent != fileEvent) { 431 // Flush the old frame 432 s.f.Data = s.readData() 433 select { 434 case <-s.exitCh: 435 return nil 436 default: 437 } 438 err := s.send(&s.f) 439 s.f.Clear() 440 if err != nil { 441 return err 442 } 443 } 444 445 // Store the new data as the current frame. 446 if s.f.IsCleared() { 447 s.f.Offset = offset 448 s.f.File = file 449 s.f.FileEvent = fileEvent 450 } 451 452 // Write the data to the buffer 453 s.data.Write(data) 454 455 // Handle the delete case in which there is no data 456 force := false 457 if s.data.Len() == 0 && s.f.FileEvent != "" { 458 force = true 459 } 460 461 // Flush till we are under the max frame size 462 for s.data.Len() >= s.frameSize || force { 463 // Clear 464 if force { 465 force = false 466 } 467 468 // Create a new frame to send it 469 s.f.Data = s.readData() 470 select { 471 case <-s.exitCh: 472 return nil 473 default: 474 } 475 476 if err := s.send(&s.f); err != nil { 477 return err 478 } 479 480 // Update the offset 481 s.f.Offset += int64(len(s.f.Data)) 482 } 483 484 if s.data.Len() == 0 { 485 s.f.Clear() 486 } 487 488 return nil 489 } 490 491 // Stream streams the content of a file blocking on EOF. 492 // The parameters are: 493 // * path: path to file to stream. 494 // * offset: The offset to start streaming data at, defaults to zero. 495 // * origin: Either "start" or "end" and defines from where the offset is 496 // applied. Defaults to "start". 497 func (s *HTTPServer) Stream(resp http.ResponseWriter, req *http.Request) (interface{}, error) { 498 var allocID, path string 499 var err error 500 501 q := req.URL.Query() 502 503 if allocID = strings.TrimPrefix(req.URL.Path, "/v1/client/fs/stream/"); allocID == "" { 504 return nil, allocIDNotPresentErr 505 } 506 507 if path = q.Get("path"); path == "" { 508 return nil, fileNameNotPresentErr 509 } 510 511 var offset int64 512 offsetString := q.Get("offset") 513 if offsetString != "" { 514 var err error 515 if offset, err = strconv.ParseInt(offsetString, 10, 64); err != nil { 516 return nil, fmt.Errorf("error parsing offset: %v", err) 517 } 518 } 519 520 origin := q.Get("origin") 521 switch origin { 522 case "start", "end": 523 case "": 524 origin = "start" 525 default: 526 return nil, invalidOrigin 527 } 528 529 fs, err := s.agent.client.GetAllocFS(allocID) 530 if err != nil { 531 return nil, err 532 } 533 534 fileInfo, err := fs.Stat(path) 535 if err != nil { 536 return nil, err 537 } 538 if fileInfo.IsDir { 539 return nil, fmt.Errorf("file %q is a directory", path) 540 } 541 542 // If offsetting from the end subtract from the size 543 if origin == "end" { 544 offset = fileInfo.Size - offset 545 546 } 547 548 // Create an output that gets flushed on every write 549 output := ioutils.NewWriteFlusher(resp) 550 551 // Create the framer 552 framer := NewStreamFramer(output, streamHeartbeatRate, streamBatchWindow, streamFrameSize) 553 framer.Run() 554 defer framer.Destroy() 555 556 err = s.stream(offset, path, fs, framer, nil) 557 if err != nil && err != syscall.EPIPE { 558 return nil, err 559 } 560 561 return nil, nil 562 } 563 564 // stream is the internal method to stream the content of a file. eofCancelCh is 565 // used to cancel the stream if triggered while at EOF. If the connection is 566 // broken an EPIPE error is returned 567 func (s *HTTPServer) stream(offset int64, path string, 568 fs allocdir.AllocDirFS, framer *StreamFramer, 569 eofCancelCh chan error) error { 570 571 // Get the reader 572 f, err := fs.ReadAt(path, offset) 573 if err != nil { 574 return err 575 } 576 defer f.Close() 577 578 // Create a tomb to cancel watch events 579 t := tomb.Tomb{} 580 defer func() { 581 t.Kill(nil) 582 t.Done() 583 }() 584 585 // parseFramerErr takes an error and returns an error. The error will 586 // potentially change if it was caused by the connection being closed. 587 parseFramerErr := func(e error) error { 588 if e == nil { 589 return nil 590 } 591 592 if strings.Contains(e.Error(), io.ErrClosedPipe.Error()) { 593 // The pipe check is for tests 594 return syscall.EPIPE 595 } 596 597 // The connection was closed by our peer 598 if strings.Contains(e.Error(), syscall.EPIPE.Error()) || strings.Contains(e.Error(), syscall.ECONNRESET.Error()) { 599 return syscall.EPIPE 600 } 601 602 return err 603 } 604 605 // Create a variable to allow setting the last event 606 var lastEvent string 607 608 // Only create the file change watcher once. But we need to do it after we 609 // read and reach EOF. 610 var changes *watch.FileChanges 611 612 // Start streaming the data 613 data := make([]byte, streamFrameSize) 614 OUTER: 615 for { 616 // Read up to the max frame size 617 n, readErr := f.Read(data) 618 619 // Update the offset 620 offset += int64(n) 621 622 // Return non-EOF errors 623 if readErr != nil && readErr != io.EOF { 624 return readErr 625 } 626 627 // Send the frame 628 if n != 0 { 629 if err := framer.Send(path, lastEvent, data[:n], offset); err != nil { 630 return parseFramerErr(err) 631 } 632 } 633 634 // Clear the last event 635 if lastEvent != "" { 636 lastEvent = "" 637 } 638 639 // Just keep reading 640 if readErr == nil { 641 continue 642 } 643 644 // If EOF is hit, wait for a change to the file 645 if changes == nil { 646 changes, err = fs.ChangeEvents(path, offset, &t) 647 if err != nil { 648 return err 649 } 650 } 651 652 for { 653 select { 654 case <-changes.Modified: 655 continue OUTER 656 case <-changes.Deleted: 657 return parseFramerErr(framer.Send(path, deleteEvent, nil, offset)) 658 case <-changes.Truncated: 659 // Close the current reader 660 if err := f.Close(); err != nil { 661 return err 662 } 663 664 // Get a new reader at offset zero 665 offset = 0 666 var err error 667 f, err = fs.ReadAt(path, offset) 668 if err != nil { 669 return err 670 } 671 defer f.Close() 672 673 // Store the last event 674 lastEvent = truncateEvent 675 continue OUTER 676 case <-framer.ExitCh(): 677 return parseFramerErr(framer.Err) 678 case err, ok := <-eofCancelCh: 679 if !ok { 680 return nil 681 } 682 683 return err 684 } 685 } 686 } 687 688 return nil 689 } 690 691 // Logs streams the content of a log blocking on EOF. The parameters are: 692 // * task: task name to stream logs for. 693 // * type: stdout/stderr to stream. 694 // * follow: A boolean of whether to follow the logs. 695 // * offset: The offset to start streaming data at, defaults to zero. 696 // * origin: Either "start" or "end" and defines from where the offset is 697 // applied. Defaults to "start". 698 func (s *HTTPServer) Logs(resp http.ResponseWriter, req *http.Request) (interface{}, error) { 699 var allocID, task, logType string 700 var follow bool 701 var err error 702 703 q := req.URL.Query() 704 705 if allocID = strings.TrimPrefix(req.URL.Path, "/v1/client/fs/logs/"); allocID == "" { 706 return nil, allocIDNotPresentErr 707 } 708 709 if task = q.Get("task"); task == "" { 710 return nil, taskNotPresentErr 711 } 712 713 if follow, err = strconv.ParseBool(q.Get("follow")); err != nil { 714 return nil, fmt.Errorf("Failed to parse follow field to boolean: %v", err) 715 } 716 717 logType = q.Get("type") 718 switch logType { 719 case "stdout", "stderr": 720 default: 721 return nil, logTypeNotPresentErr 722 } 723 724 var offset int64 725 offsetString := q.Get("offset") 726 if offsetString != "" { 727 var err error 728 if offset, err = strconv.ParseInt(offsetString, 10, 64); err != nil { 729 return nil, fmt.Errorf("error parsing offset: %v", err) 730 } 731 } 732 733 origin := q.Get("origin") 734 switch origin { 735 case "start", "end": 736 case "": 737 origin = "start" 738 default: 739 return nil, invalidOrigin 740 } 741 742 fs, err := s.agent.client.GetAllocFS(allocID) 743 if err != nil { 744 return nil, err 745 } 746 747 // Create an output that gets flushed on every write 748 output := ioutils.NewWriteFlusher(resp) 749 750 return nil, s.logs(follow, offset, origin, task, logType, fs, output) 751 } 752 753 func (s *HTTPServer) logs(follow bool, offset int64, 754 origin, task, logType string, 755 fs allocdir.AllocDirFS, output io.WriteCloser) error { 756 757 // Create the framer 758 framer := NewStreamFramer(output, streamHeartbeatRate, streamBatchWindow, streamFrameSize) 759 framer.Run() 760 defer framer.Destroy() 761 762 // Path to the logs 763 logPath := filepath.Join(allocdir.SharedAllocName, allocdir.LogDirName) 764 765 // nextIdx is the next index to read logs from 766 var nextIdx int64 767 switch origin { 768 case "start": 769 nextIdx = 0 770 case "end": 771 nextIdx = math.MaxInt64 772 offset *= -1 773 default: 774 return invalidOrigin 775 } 776 777 // Create a tomb to cancel watch events 778 t := tomb.Tomb{} 779 defer func() { 780 t.Kill(nil) 781 t.Done() 782 }() 783 784 for { 785 // Logic for picking next file is: 786 // 1) List log files 787 // 2) Pick log file closest to desired index 788 // 3) Open log file at correct offset 789 // 3a) No error, read contents 790 // 3b) If file doesn't exist, goto 1 as it may have been rotated out 791 entries, err := fs.List(logPath) 792 if err != nil { 793 return fmt.Errorf("failed to list entries: %v", err) 794 } 795 796 // If we are not following logs, determine the max index for the logs we are 797 // interested in so we can stop there. 798 maxIndex := int64(math.MaxInt64) 799 if !follow { 800 _, idx, _, err := findClosest(entries, maxIndex, 0, task, logType) 801 if err != nil { 802 return err 803 } 804 maxIndex = idx 805 } 806 807 logEntry, idx, openOffset, err := findClosest(entries, nextIdx, offset, task, logType) 808 if err != nil { 809 return err 810 } 811 812 var eofCancelCh chan error 813 exitAfter := false 814 if !follow && idx > maxIndex { 815 // Exceeded what was there initially so return 816 return nil 817 } else if !follow && idx == maxIndex { 818 // At the end 819 eofCancelCh = make(chan error) 820 close(eofCancelCh) 821 exitAfter = true 822 } else { 823 eofCancelCh = blockUntilNextLog(fs, &t, logPath, task, logType, idx+1) 824 } 825 826 p := filepath.Join(logPath, logEntry.Name) 827 err = s.stream(openOffset, p, fs, framer, eofCancelCh) 828 829 if err != nil { 830 // Check if there was an error where the file does not exist. That means 831 // it got rotated out from under us. 832 if os.IsNotExist(err) { 833 continue 834 } 835 836 // Check if the connection was closed 837 if err == syscall.EPIPE { 838 return nil 839 } 840 841 return fmt.Errorf("failed to stream %q: %v", p, err) 842 } 843 844 if exitAfter { 845 return nil 846 } 847 848 //Since we successfully streamed, update the overall offset/idx. 849 offset = int64(0) 850 nextIdx = idx + 1 851 } 852 853 return nil 854 } 855 856 // blockUntilNextLog returns a channel that will have data sent when the next 857 // log index or anything greater is created. 858 func blockUntilNextLog(fs allocdir.AllocDirFS, t *tomb.Tomb, logPath, task, logType string, nextIndex int64) chan error { 859 nextPath := filepath.Join(logPath, fmt.Sprintf("%s.%s.%d", task, logType, nextIndex)) 860 next := make(chan error, 1) 861 862 go func() { 863 eofCancelCh, err := fs.BlockUntilExists(nextPath, t) 864 if err != nil { 865 next <- err 866 close(next) 867 return 868 } 869 870 ticker := time.NewTicker(nextLogCheckRate) 871 defer ticker.Stop() 872 scanCh := ticker.C 873 for { 874 select { 875 case <-t.Dead(): 876 next <- fmt.Errorf("shutdown triggered") 877 close(next) 878 return 879 case err := <-eofCancelCh: 880 next <- err 881 close(next) 882 return 883 case <-scanCh: 884 entries, err := fs.List(logPath) 885 if err != nil { 886 next <- fmt.Errorf("failed to list entries: %v", err) 887 close(next) 888 return 889 } 890 891 indexes, err := logIndexes(entries, task, logType) 892 if err != nil { 893 next <- err 894 close(next) 895 return 896 } 897 898 // Scan and see if there are any entries larger than what we are 899 // waiting for. 900 for _, entry := range indexes { 901 if entry.idx >= nextIndex { 902 next <- nil 903 close(next) 904 return 905 } 906 } 907 } 908 } 909 }() 910 911 return next 912 } 913 914 // indexTuple and indexTupleArray are used to find the correct log entry to 915 // start streaming logs from 916 type indexTuple struct { 917 idx int64 918 entry *allocdir.AllocFileInfo 919 } 920 921 type indexTupleArray []indexTuple 922 923 func (a indexTupleArray) Len() int { return len(a) } 924 func (a indexTupleArray) Less(i, j int) bool { return a[i].idx < a[j].idx } 925 func (a indexTupleArray) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 926 927 // logIndexes takes a set of entries and returns a indexTupleArray of 928 // the desired log file entries. If the indexes could not be determined, an 929 // error is returned. 930 func logIndexes(entries []*allocdir.AllocFileInfo, task, logType string) (indexTupleArray, error) { 931 var indexes []indexTuple 932 prefix := fmt.Sprintf("%s.%s.", task, logType) 933 for _, entry := range entries { 934 if entry.IsDir { 935 continue 936 } 937 938 // If nothing was trimmed, then it is not a match 939 idxStr := strings.TrimPrefix(entry.Name, prefix) 940 if idxStr == entry.Name { 941 continue 942 } 943 944 // Convert to an int 945 idx, err := strconv.Atoi(idxStr) 946 if err != nil { 947 return nil, fmt.Errorf("failed to convert %q to a log index: %v", idxStr, err) 948 } 949 950 indexes = append(indexes, indexTuple{idx: int64(idx), entry: entry}) 951 } 952 953 return indexTupleArray(indexes), nil 954 } 955 956 // findClosest takes a list of entries, the desired log index and desired log 957 // offset (which can be negative, treated as offset from end), task name and log 958 // type and returns the log entry, the log index, the offset to read from and a 959 // potential error. 960 func findClosest(entries []*allocdir.AllocFileInfo, desiredIdx, desiredOffset int64, 961 task, logType string) (*allocdir.AllocFileInfo, int64, int64, error) { 962 963 // Build the matching indexes 964 indexes, err := logIndexes(entries, task, logType) 965 if err != nil { 966 return nil, 0, 0, err 967 } 968 if len(indexes) == 0 { 969 return nil, 0, 0, fmt.Errorf("log entry for task %q and log type %q not found", task, logType) 970 } 971 972 // Binary search the indexes to get the desiredIdx 973 sort.Sort(indexTupleArray(indexes)) 974 i := sort.Search(len(indexes), func(i int) bool { return indexes[i].idx >= desiredIdx }) 975 l := len(indexes) 976 if i == l { 977 // Use the last index if the number is bigger than all of them. 978 i = l - 1 979 } 980 981 // Get to the correct offset 982 offset := desiredOffset 983 idx := int64(i) 984 for { 985 s := indexes[idx].entry.Size 986 987 // Base case 988 if offset == 0 { 989 break 990 } else if offset < 0 { 991 // Going backwards 992 if newOffset := s + offset; newOffset >= 0 { 993 // Current file works 994 offset = newOffset 995 break 996 } else if idx == 0 { 997 // Already at the end 998 offset = 0 999 break 1000 } else { 1001 // Try the file before 1002 offset = newOffset 1003 idx -= 1 1004 continue 1005 } 1006 } else { 1007 // Going forward 1008 if offset <= s { 1009 // Current file works 1010 break 1011 } else if idx == int64(l-1) { 1012 // Already at the end 1013 offset = s 1014 break 1015 } else { 1016 // Try the next file 1017 offset = offset - s 1018 idx += 1 1019 continue 1020 } 1021 1022 } 1023 } 1024 1025 return indexes[idx].entry, indexes[idx].idx, offset, nil 1026 }