github.com/argoproj/argo-cd/v2@v2.10.9/server/application/logs.go (about) 1 package application 2 3 import ( 4 "bufio" 5 "io" 6 "strings" 7 "sync" 8 "sync/atomic" 9 "time" 10 ) 11 12 type logEntry struct { 13 line string 14 timeStamp time.Time 15 podName string 16 err error 17 } 18 19 // parseLogsStream converts given ReadCloser into channel that emits log entries 20 func parseLogsStream(podName string, stream io.ReadCloser, ch chan logEntry) { 21 bufReader := bufio.NewReader(stream) 22 eof := false 23 for !eof { 24 line, err := bufReader.ReadString('\n') 25 if err == io.EOF { 26 eof = true 27 // stop if we reached end of stream and the next line is empty 28 if line == "" { 29 break 30 } 31 } else if err != nil && err != io.EOF { 32 ch <- logEntry{err: err} 33 break 34 } 35 36 line = strings.TrimSpace(line) // Remove trailing line ending 37 parts := strings.Split(line, " ") 38 timeStampStr := parts[0] 39 logTime, err := time.Parse(time.RFC3339Nano, timeStampStr) 40 if err != nil { 41 ch <- logEntry{err: err} 42 break 43 } 44 45 lines := strings.Join(parts[1:], " ") 46 for _, line := range strings.Split(lines, "\r") { 47 ch <- logEntry{line: line, timeStamp: logTime, podName: podName} 48 } 49 50 } 51 } 52 53 // mergeLogStreams merge two stream of logs and ensures that merged logs are sorted by timestamp. 54 // The implementation uses merge sort: method reads next log entry from each stream if one of streams is empty 55 // it waits for no longer than specified duration and then merges available entries. 56 func mergeLogStreams(streams []chan logEntry, bufferingDuration time.Duration) chan logEntry { 57 merged := make(chan logEntry) 58 59 // buffer of received log entries for each stream 60 entriesPerStream := make([][]logEntry, len(streams)) 61 process := make(chan struct{}) 62 63 var lock sync.Mutex 64 streamsCount := int32(len(streams)) 65 66 // start goroutine per stream that continuously put new log entries into buffer and triggers processing 67 for i := range streams { 68 go func(index int) { 69 for next := range streams[index] { 70 lock.Lock() 71 entriesPerStream[index] = append(entriesPerStream[index], next) 72 lock.Unlock() 73 process <- struct{}{} 74 } 75 // stop processing after all streams got closed 76 if atomic.AddInt32(&streamsCount, -1) == 0 { 77 close(process) 78 } 79 }(i) 80 } 81 82 // send moves log entries from buffer into merged stream 83 // if flush=true then sends log entries into merged stream even if buffer of some streams are empty 84 send := func(flush bool) bool { 85 var entries []logEntry 86 lock.Lock() 87 for { 88 oldest := -1 89 someEmpty := false 90 allEmpty := true 91 for i := range entriesPerStream { 92 entries := entriesPerStream[i] 93 if len(entries) > 0 { 94 if oldest == -1 || entriesPerStream[oldest][0].timeStamp.After(entries[0].timeStamp) { 95 oldest = i 96 } 97 allEmpty = false 98 } else { 99 someEmpty = true 100 } 101 } 102 103 if allEmpty || someEmpty && !flush { 104 break 105 } 106 107 if oldest > -1 { 108 entries = append(entries, entriesPerStream[oldest][0]) 109 entriesPerStream[oldest] = entriesPerStream[oldest][1:] 110 } 111 } 112 lock.Unlock() 113 for i := range entries { 114 merged <- entries[i] 115 } 116 return len(entries) > 0 117 } 118 119 var sentAtLock sync.Mutex 120 var sentAt time.Time 121 122 ticker := time.NewTicker(bufferingDuration) 123 go func() { 124 for range ticker.C { 125 sentAtLock.Lock() 126 // waited long enough for logs from each streams, send everything accumulated 127 if sentAt.Add(bufferingDuration).Before(time.Now()) { 128 _ = send(true) 129 sentAt = time.Now() 130 } 131 132 sentAtLock.Unlock() 133 } 134 }() 135 136 go func() { 137 for range process { 138 if send(false) { 139 sentAtLock.Lock() 140 sentAt = time.Now() 141 sentAtLock.Unlock() 142 } 143 } 144 145 _ = send(true) 146 147 close(merged) 148 ticker.Stop() 149 }() 150 return merged 151 }