github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/engine/daemon/logger/local/read.go (about) 1 package local 2 3 import ( 4 "bytes" 5 "context" 6 "encoding/binary" 7 "fmt" 8 "io" 9 10 "github.com/docker/docker/api/types/plugins/logdriver" 11 "github.com/docker/docker/daemon/logger" 12 "github.com/docker/docker/daemon/logger/loggerutils" 13 "github.com/docker/docker/errdefs" 14 "github.com/pkg/errors" 15 ) 16 17 // maxMsgLen is the maximum size of the logger.Message after serialization. 18 // logger.defaultBufSize caps the size of Line field. 19 const maxMsgLen int = 1e6 // 1MB. 20 21 func (d *driver) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { 22 logWatcher := logger.NewLogWatcher() 23 24 go d.readLogs(logWatcher, config) 25 return logWatcher 26 } 27 28 func (d *driver) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { 29 defer close(watcher.Msg) 30 31 d.mu.Lock() 32 d.readers[watcher] = struct{}{} 33 d.mu.Unlock() 34 35 d.logfile.ReadLogs(config, watcher) 36 37 d.mu.Lock() 38 delete(d.readers, watcher) 39 d.mu.Unlock() 40 } 41 42 func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { 43 size := r.Size() 44 if req < 0 { 45 return nil, 0, errdefs.InvalidParameter(errors.Errorf("invalid number of lines to tail: %d", req)) 46 } 47 48 if size < (encodeBinaryLen*2)+1 { 49 return bytes.NewReader(nil), 0, nil 50 } 51 52 const encodeBinaryLen64 = int64(encodeBinaryLen) 53 var found int 54 55 buf := make([]byte, encodeBinaryLen) 56 57 offset := size 58 for { 59 select { 60 case <-ctx.Done(): 61 return nil, 0, ctx.Err() 62 default: 63 } 64 65 n, err := r.ReadAt(buf, offset-encodeBinaryLen64) 66 if err != nil && err != io.EOF { 67 return nil, 0, errors.Wrap(err, "error reading log message footer") 68 } 69 70 if n != encodeBinaryLen { 71 return nil, 0, errdefs.DataLoss(errors.New("unexpected number of bytes read from log message footer")) 72 } 73 74 msgLen := binary.BigEndian.Uint32(buf) 75 76 n, err = r.ReadAt(buf, offset-encodeBinaryLen64-encodeBinaryLen64-int64(msgLen)) 77 if err != nil && err != io.EOF { 78 return nil, 0, errors.Wrap(err, "error reading log message header") 79 } 80 81 if n != encodeBinaryLen { 82 return nil, 0, errdefs.DataLoss(errors.New("unexpected number of bytes read from log message header")) 83 } 84 85 if msgLen != binary.BigEndian.Uint32(buf) { 86 return nil, 0, errdefs.DataLoss(errors.Wrap(err, "log message header and footer indicate different message sizes")) 87 } 88 89 found++ 90 offset -= int64(msgLen) 91 offset -= encodeBinaryLen64 * 2 92 if found == req { 93 break 94 } 95 if offset <= 0 { 96 break 97 } 98 } 99 100 return io.NewSectionReader(r, offset, size), found, nil 101 } 102 103 type decoder struct { 104 rdr io.Reader 105 proto *logdriver.LogEntry 106 // buf keeps bytes from rdr. 107 buf []byte 108 // offset is the position in buf. 109 // If offset > 0, buf[offset:] has bytes which are read but haven't used. 110 offset int 111 // nextMsgLen is the length of the next log message. 112 // If nextMsgLen = 0, a new value must be read from rdr. 113 nextMsgLen int 114 } 115 116 func (d *decoder) readRecord(size int) error { 117 var err error 118 for i := 0; i < maxDecodeRetry; i++ { 119 var n int 120 n, err = io.ReadFull(d.rdr, d.buf[d.offset:size]) 121 d.offset += n 122 if err != nil { 123 if err != io.ErrUnexpectedEOF { 124 return err 125 } 126 continue 127 } 128 break 129 } 130 if err != nil { 131 return err 132 } 133 d.offset = 0 134 return nil 135 } 136 137 func (d *decoder) Decode() (*logger.Message, error) { 138 if d.proto == nil { 139 d.proto = &logdriver.LogEntry{} 140 } else { 141 resetProto(d.proto) 142 } 143 if d.buf == nil { 144 d.buf = make([]byte, initialBufSize) 145 } 146 147 if d.nextMsgLen == 0 { 148 msgLen, err := d.decodeSizeHeader() 149 if err != nil { 150 return nil, err 151 } 152 153 if msgLen > maxMsgLen { 154 return nil, fmt.Errorf("log message is too large (%d > %d)", msgLen, maxMsgLen) 155 } 156 157 if len(d.buf) < msgLen+encodeBinaryLen { 158 d.buf = make([]byte, msgLen+encodeBinaryLen) 159 } else if msgLen <= initialBufSize { 160 d.buf = d.buf[:initialBufSize] 161 } else { 162 d.buf = d.buf[:msgLen+encodeBinaryLen] 163 } 164 165 d.nextMsgLen = msgLen 166 } 167 return d.decodeLogEntry() 168 } 169 170 func (d *decoder) Reset(rdr io.Reader) { 171 if d.rdr == rdr { 172 return 173 } 174 175 d.rdr = rdr 176 if d.proto != nil { 177 resetProto(d.proto) 178 } 179 if d.buf != nil { 180 d.buf = d.buf[:initialBufSize] 181 } 182 d.offset = 0 183 d.nextMsgLen = 0 184 } 185 186 func (d *decoder) Close() { 187 d.buf = d.buf[:0] 188 d.buf = nil 189 if d.proto != nil { 190 resetProto(d.proto) 191 } 192 d.rdr = nil 193 } 194 195 func decodeFunc(rdr io.Reader) loggerutils.Decoder { 196 return &decoder{rdr: rdr} 197 } 198 199 func (d *decoder) decodeSizeHeader() (int, error) { 200 err := d.readRecord(encodeBinaryLen) 201 if err != nil { 202 return 0, errors.Wrap(err, "could not read a size header") 203 } 204 205 msgLen := int(binary.BigEndian.Uint32(d.buf[:encodeBinaryLen])) 206 return msgLen, nil 207 } 208 209 func (d *decoder) decodeLogEntry() (*logger.Message, error) { 210 msgLen := d.nextMsgLen 211 err := d.readRecord(msgLen + encodeBinaryLen) 212 if err != nil { 213 return nil, errors.Wrapf(err, "could not read a log entry (size=%d+%d)", msgLen, encodeBinaryLen) 214 } 215 d.nextMsgLen = 0 216 217 if err := d.proto.Unmarshal(d.buf[:msgLen]); err != nil { 218 return nil, errors.Wrapf(err, "error unmarshalling log entry (size=%d)", msgLen) 219 } 220 221 msg := protoToMessage(d.proto) 222 if msg.PLogMetaData == nil { 223 msg.Line = append(msg.Line, '\n') 224 } 225 226 return msg, nil 227 }