github.com/argoproj/argo-cd/v3@v3.2.1/server/application/logs.go (about)

     1  package application
     2  
     3  import (
     4  	"bufio"
     5  	"errors"
     6  	"io"
     7  	"strings"
     8  	"sync"
     9  	"sync/atomic"
    10  	"time"
    11  )
    12  
    13  type logEntry struct {
    14  	line      string
    15  	timeStamp time.Time
    16  	podName   string
    17  	err       error
    18  }
    19  
    20  // parseLogsStream converts given ReadCloser into channel that emits log entries
    21  func parseLogsStream(podName string, stream io.ReadCloser, ch chan logEntry) {
    22  	bufReader := bufio.NewReader(stream)
    23  	eof := false
    24  	for !eof {
    25  		line, err := bufReader.ReadString('\n')
    26  		if err != nil && errors.Is(err, io.EOF) {
    27  			eof = true
    28  			// stop if we reached end of stream and the next line is empty
    29  			if line == "" {
    30  				break
    31  			}
    32  		} else if err != nil && !errors.Is(err, io.EOF) {
    33  			ch <- logEntry{err: err}
    34  			break
    35  		}
    36  
    37  		line = strings.TrimSpace(line) // Remove trailing line ending
    38  		parts := strings.Split(line, " ")
    39  		timeStampStr := parts[0]
    40  		logTime, err := time.Parse(time.RFC3339Nano, timeStampStr)
    41  		if err != nil {
    42  			ch <- logEntry{err: err}
    43  			break
    44  		}
    45  
    46  		lines := strings.Join(parts[1:], " ")
    47  		for _, line := range strings.Split(lines, "\r") {
    48  			ch <- logEntry{line: line, timeStamp: logTime, podName: podName}
    49  		}
    50  	}
    51  }
    52  
    53  // mergeLogStreams merge two stream of logs and ensures that merged logs are sorted by timestamp.
    54  // The implementation uses merge sort: method reads next log entry from each stream if one of streams is empty
    55  // it waits for no longer than specified duration and then merges available entries.
    56  func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) chan logEntry {
    57  	merged := make(chan logEntry)
    58  
    59  	// buffer of received log entries for each stream
    60  	entriesPerStream := make([][]logEntry, len(streams))
    61  	process := make(chan struct{})
    62  
    63  	var lock sync.Mutex
    64  	streamsCount := int32(len(streams))
    65  
    66  	// start goroutine per stream that continuously put new log entries into buffer and triggers processing
    67  	for i := range streams {
    68  		go func(index int) {
    69  			for next := range streams[index] {
    70  				lock.Lock()
    71  				entriesPerStream[index] = append(entriesPerStream[index], next)
    72  				lock.Unlock()
    73  				process <- struct{}{}
    74  			}
    75  			// stop processing after all streams got closed
    76  			if atomic.AddInt32(&streamsCount, -1) == 0 {
    77  				close(process)
    78  			}
    79  		}(i)
    80  	}
    81  
    82  	// send moves log entries from buffer into merged stream
    83  	// if flush=true then sends log entries into merged stream even if buffer of some streams are empty
    84  	send := func(flush bool) bool {
    85  		var entries []logEntry
    86  		lock.Lock()
    87  		for {
    88  			oldest := -1
    89  			someEmpty := false
    90  			allEmpty := true
    91  			for i := range entriesPerStream {
    92  				entries := entriesPerStream[i]
    93  				if len(entries) > 0 {
    94  					if oldest == -1 || entriesPerStream[oldest][0].timeStamp.After(entries[0].timeStamp) {
    95  						oldest = i
    96  					}
    97  					allEmpty = false
    98  				} else {
    99  					someEmpty = true
   100  				}
   101  			}
   102  
   103  			if allEmpty || someEmpty && !flush {
   104  				break
   105  			}
   106  
   107  			if oldest > -1 {
   108  				entries = append(entries, entriesPerStream[oldest][0])
   109  				entriesPerStream[oldest] = entriesPerStream[oldest][1:]
   110  			}
   111  		}
   112  		lock.Unlock()
   113  		for i := range entries {
   114  			merged <- entries[i]
   115  		}
   116  		return len(entries) > 0
   117  	}
   118  
   119  	var sentAtLock sync.Mutex
   120  	var sentAt time.Time
   121  
   122  	ticker := time.NewTicker(bufferingDuration)
   123  	done := make(chan struct{})
   124  	go func() {
   125  		for {
   126  			select {
   127  			case <-done:
   128  				return
   129  			case <-ticker.C:
   130  				sentAtLock.Lock()
   131  				// waited long enough for logs from each streams, send everything accumulated
   132  				if sentAt.Add(bufferingDuration).Before(time.Now()) {
   133  					_ = send(true)
   134  					sentAt = time.Now()
   135  				}
   136  
   137  				sentAtLock.Unlock()
   138  			}
   139  		}
   140  	}()
   141  
   142  	go func() {
   143  		for range process {
   144  			if send(false) {
   145  				sentAtLock.Lock()
   146  				sentAt = time.Now()
   147  				sentAtLock.Unlock()
   148  			}
   149  		}
   150  
   151  		_ = send(true)
   152  
   153  		ticker.Stop()
   154  		// ticker.Stop() does not close the channel, and it does not wait for the channel to be drained. So we need to
   155  		// explicitly prevent the gorountine from leaking by closing the channel. We also need to prevent the goroutine
   156  		// from calling `send` again, because `send` pushes to the `merged` channel which we're about to close.
   157  		// This describes the approach nicely: https://stackoverflow.com/questions/17797754/ticker-stop-behaviour-in-golang
   158  		done <- struct{}{}
   159  		close(merged)
   160  	}()
   161  	return merged
   162  }