github.com/rsampaio/docker@v0.7.2-0.20150827203920-fdc73cc3fc31/pkg/ioutils/readers.go (about) 1 package ioutils 2 3 import ( 4 "bytes" 5 "crypto/sha256" 6 "encoding/hex" 7 "io" 8 "math/rand" 9 "sync" 10 "time" 11 12 "github.com/docker/docker/pkg/random" 13 ) 14 15 var rndSrc = random.NewSource() 16 17 type readCloserWrapper struct { 18 io.Reader 19 closer func() error 20 } 21 22 func (r *readCloserWrapper) Close() error { 23 return r.closer() 24 } 25 26 // NewReadCloserWrapper returns a new io.ReadCloser. 27 func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { 28 return &readCloserWrapper{ 29 Reader: r, 30 closer: closer, 31 } 32 } 33 34 type readerErrWrapper struct { 35 reader io.Reader 36 closer func() 37 } 38 39 func (r *readerErrWrapper) Read(p []byte) (int, error) { 40 n, err := r.reader.Read(p) 41 if err != nil { 42 r.closer() 43 } 44 return n, err 45 } 46 47 // NewReaderErrWrapper returns a new io.Reader. 48 func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { 49 return &readerErrWrapper{ 50 reader: r, 51 closer: closer, 52 } 53 } 54 55 // bufReader allows the underlying reader to continue to produce 56 // output by pre-emptively reading from the wrapped reader. 57 // This is achieved by buffering this data in bufReader's 58 // expanding buffer. 59 type bufReader struct { 60 sync.Mutex 61 buf *bytes.Buffer 62 reader io.Reader 63 err error 64 wait sync.Cond 65 drainBuf []byte 66 reuseBuf []byte 67 maxReuse int64 68 resetTimeout time.Duration 69 bufLenResetThreshold int64 70 maxReadDataReset int64 71 } 72 73 // NewBufReader returns a new bufReader. 74 func NewBufReader(r io.Reader) io.ReadCloser { 75 timeout := rand.New(rndSrc).Intn(120) + 180 76 77 reader := &bufReader{ 78 buf: &bytes.Buffer{}, 79 drainBuf: make([]byte, 1024), 80 reuseBuf: make([]byte, 4096), 81 maxReuse: 1000, 82 resetTimeout: time.Duration(timeout) * time.Second, 83 bufLenResetThreshold: 100 * 1024, 84 maxReadDataReset: 10 * 1024 * 1024, 85 reader: r, 86 } 87 reader.wait.L = &reader.Mutex 88 go reader.drain() 89 return reader 90 } 91 92 // NewBufReaderWithDrainbufAndBuffer returns a BufReader with drainBuffer and buffer. 93 func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) io.ReadCloser { 94 reader := &bufReader{ 95 buf: buffer, 96 drainBuf: drainBuffer, 97 reader: r, 98 } 99 reader.wait.L = &reader.Mutex 100 go reader.drain() 101 return reader 102 } 103 104 func (r *bufReader) drain() { 105 var ( 106 duration time.Duration 107 lastReset time.Time 108 now time.Time 109 reset bool 110 bufLen int64 111 dataSinceReset int64 112 maxBufLen int64 113 reuseBufLen int64 114 reuseCount int64 115 ) 116 reuseBufLen = int64(len(r.reuseBuf)) 117 lastReset = time.Now() 118 for { 119 n, err := r.reader.Read(r.drainBuf) 120 dataSinceReset += int64(n) 121 r.Lock() 122 bufLen = int64(r.buf.Len()) 123 if bufLen > maxBufLen { 124 maxBufLen = bufLen 125 } 126 127 // Avoid unbounded growth of the buffer over time. 128 // This has been discovered to be the only non-intrusive 129 // solution to the unbounded growth of the buffer. 130 // Alternative solutions such as compression, multiple 131 // buffers, channels and other similar pieces of code 132 // were reducing throughput, overall Docker performance 133 // or simply crashed Docker. 134 // This solution releases the buffer when specific 135 // conditions are met to avoid the continuous resizing 136 // of the buffer for long lived containers. 137 // 138 // Move data to the front of the buffer if it's 139 // smaller than what reuseBuf can store 140 if bufLen > 0 && reuseBufLen >= bufLen { 141 n, _ := r.buf.Read(r.reuseBuf) 142 r.buf.Write(r.reuseBuf[0:n]) 143 // Take action if the buffer has been reused too many 144 // times and if there's data in the buffer. 145 // The timeout is also used as means to avoid doing 146 // these operations more often or less often than 147 // required. 148 // The various conditions try to detect heavy activity 149 // in the buffer which might be indicators of heavy 150 // growth of the buffer. 151 } else if reuseCount >= r.maxReuse && bufLen > 0 { 152 now = time.Now() 153 duration = now.Sub(lastReset) 154 timeoutReached := duration >= r.resetTimeout 155 156 // The timeout has been reached and the 157 // buffered data couldn't be moved to the front 158 // of the buffer, so the buffer gets reset. 159 if timeoutReached && bufLen > reuseBufLen { 160 reset = true 161 } 162 // The amount of buffered data is too high now, 163 // reset the buffer. 164 if timeoutReached && maxBufLen >= r.bufLenResetThreshold { 165 reset = true 166 } 167 // Reset the buffer if a certain amount of 168 // data has gone through the buffer since the 169 // last reset. 170 if timeoutReached && dataSinceReset >= r.maxReadDataReset { 171 reset = true 172 } 173 // The buffered data is moved to a fresh buffer, 174 // swap the old buffer with the new one and 175 // reset all counters. 176 if reset { 177 newbuf := &bytes.Buffer{} 178 newbuf.ReadFrom(r.buf) 179 r.buf = newbuf 180 lastReset = now 181 reset = false 182 dataSinceReset = 0 183 maxBufLen = 0 184 reuseCount = 0 185 } 186 } 187 if err != nil { 188 r.err = err 189 } else { 190 r.buf.Write(r.drainBuf[0:n]) 191 } 192 reuseCount++ 193 r.wait.Signal() 194 r.Unlock() 195 callSchedulerIfNecessary() 196 if err != nil { 197 break 198 } 199 } 200 } 201 202 func (r *bufReader) Read(p []byte) (n int, err error) { 203 r.Lock() 204 defer r.Unlock() 205 for { 206 n, err = r.buf.Read(p) 207 if n > 0 { 208 return n, err 209 } 210 if r.err != nil { 211 return 0, r.err 212 } 213 r.wait.Wait() 214 } 215 } 216 217 // Close closes the bufReader 218 func (r *bufReader) Close() error { 219 closer, ok := r.reader.(io.ReadCloser) 220 if !ok { 221 return nil 222 } 223 return closer.Close() 224 } 225 226 // HashData returns the sha256 sum of src. 227 func HashData(src io.Reader) (string, error) { 228 h := sha256.New() 229 if _, err := io.Copy(h, src); err != nil { 230 return "", err 231 } 232 return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil 233 } 234 235 // OnEOFReader wraps a io.ReadCloser and a function 236 // the function will run at the end of file or close the file. 237 type OnEOFReader struct { 238 Rc io.ReadCloser 239 Fn func() 240 } 241 242 func (r *OnEOFReader) Read(p []byte) (n int, err error) { 243 n, err = r.Rc.Read(p) 244 if err == io.EOF { 245 r.runFunc() 246 } 247 return 248 } 249 250 // Close closes the file and run the function. 251 func (r *OnEOFReader) Close() error { 252 err := r.Rc.Close() 253 r.runFunc() 254 return err 255 } 256 257 func (r *OnEOFReader) runFunc() { 258 if fn := r.Fn; fn != nil { 259 fn() 260 r.Fn = nil 261 } 262 }