github.com/rentongzhang/docker@v1.8.2-rc1/pkg/ioutils/readers.go (about) 1 package ioutils 2 3 import ( 4 "bytes" 5 "crypto/rand" 6 "crypto/sha256" 7 "encoding/hex" 8 "io" 9 "math/big" 10 "sync" 11 "time" 12 ) 13 14 type readCloserWrapper struct { 15 io.Reader 16 closer func() error 17 } 18 19 func (r *readCloserWrapper) Close() error { 20 return r.closer() 21 } 22 23 func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { 24 return &readCloserWrapper{ 25 Reader: r, 26 closer: closer, 27 } 28 } 29 30 type readerErrWrapper struct { 31 reader io.Reader 32 closer func() 33 } 34 35 func (r *readerErrWrapper) Read(p []byte) (int, error) { 36 n, err := r.reader.Read(p) 37 if err != nil { 38 r.closer() 39 } 40 return n, err 41 } 42 43 func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { 44 return &readerErrWrapper{ 45 reader: r, 46 closer: closer, 47 } 48 } 49 50 // bufReader allows the underlying reader to continue to produce 51 // output by pre-emptively reading from the wrapped reader. 52 // This is achieved by buffering this data in bufReader's 53 // expanding buffer. 54 type bufReader struct { 55 sync.Mutex 56 buf *bytes.Buffer 57 reader io.Reader 58 err error 59 wait sync.Cond 60 drainBuf []byte 61 reuseBuf []byte 62 maxReuse int64 63 resetTimeout time.Duration 64 bufLenResetThreshold int64 65 maxReadDataReset int64 66 } 67 68 func NewBufReader(r io.Reader) *bufReader { 69 var timeout int 70 if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { 71 timeout = int(randVal.Int64()) + 180 72 } else { 73 timeout = 300 74 } 75 reader := &bufReader{ 76 buf: &bytes.Buffer{}, 77 drainBuf: make([]byte, 1024), 78 reuseBuf: make([]byte, 4096), 79 maxReuse: 1000, 80 resetTimeout: time.Second * time.Duration(timeout), 81 bufLenResetThreshold: 100 * 1024, 82 maxReadDataReset: 10 * 1024 * 1024, 83 reader: r, 84 } 85 reader.wait.L = &reader.Mutex 86 go reader.drain() 87 return reader 88 } 89 90 func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { 91 reader := &bufReader{ 92 buf: buffer, 93 drainBuf: drainBuffer, 94 reader: r, 95 } 96 reader.wait.L = &reader.Mutex 97 go reader.drain() 98 return reader 99 } 100 101 func (r *bufReader) drain() { 102 var ( 103 duration time.Duration 104 lastReset time.Time 105 now time.Time 106 reset bool 107 bufLen int64 108 dataSinceReset int64 109 maxBufLen int64 110 reuseBufLen int64 111 reuseCount int64 112 ) 113 reuseBufLen = int64(len(r.reuseBuf)) 114 lastReset = time.Now() 115 for { 116 n, err := r.reader.Read(r.drainBuf) 117 dataSinceReset += int64(n) 118 r.Lock() 119 bufLen = int64(r.buf.Len()) 120 if bufLen > maxBufLen { 121 maxBufLen = bufLen 122 } 123 124 // Avoid unbounded growth of the buffer over time. 125 // This has been discovered to be the only non-intrusive 126 // solution to the unbounded growth of the buffer. 127 // Alternative solutions such as compression, multiple 128 // buffers, channels and other similar pieces of code 129 // were reducing throughput, overall Docker performance 130 // or simply crashed Docker. 131 // This solution releases the buffer when specific 132 // conditions are met to avoid the continuous resizing 133 // of the buffer for long lived containers. 134 // 135 // Move data to the front of the buffer if it's 136 // smaller than what reuseBuf can store 137 if bufLen > 0 && reuseBufLen >= bufLen { 138 n, _ := r.buf.Read(r.reuseBuf) 139 r.buf.Write(r.reuseBuf[0:n]) 140 // Take action if the buffer has been reused too many 141 // times and if there's data in the buffer. 142 // The timeout is also used as means to avoid doing 143 // these operations more often or less often than 144 // required. 145 // The various conditions try to detect heavy activity 146 // in the buffer which might be indicators of heavy 147 // growth of the buffer. 148 } else if reuseCount >= r.maxReuse && bufLen > 0 { 149 now = time.Now() 150 duration = now.Sub(lastReset) 151 timeoutReached := duration >= r.resetTimeout 152 153 // The timeout has been reached and the 154 // buffered data couldn't be moved to the front 155 // of the buffer, so the buffer gets reset. 156 if timeoutReached && bufLen > reuseBufLen { 157 reset = true 158 } 159 // The amount of buffered data is too high now, 160 // reset the buffer. 161 if timeoutReached && maxBufLen >= r.bufLenResetThreshold { 162 reset = true 163 } 164 // Reset the buffer if a certain amount of 165 // data has gone through the buffer since the 166 // last reset. 167 if timeoutReached && dataSinceReset >= r.maxReadDataReset { 168 reset = true 169 } 170 // The buffered data is moved to a fresh buffer, 171 // swap the old buffer with the new one and 172 // reset all counters. 173 if reset { 174 newbuf := &bytes.Buffer{} 175 newbuf.ReadFrom(r.buf) 176 r.buf = newbuf 177 lastReset = now 178 reset = false 179 dataSinceReset = 0 180 maxBufLen = 0 181 reuseCount = 0 182 } 183 } 184 if err != nil { 185 r.err = err 186 } else { 187 r.buf.Write(r.drainBuf[0:n]) 188 } 189 reuseCount++ 190 r.wait.Signal() 191 r.Unlock() 192 callSchedulerIfNecessary() 193 if err != nil { 194 break 195 } 196 } 197 } 198 199 func (r *bufReader) Read(p []byte) (n int, err error) { 200 r.Lock() 201 defer r.Unlock() 202 for { 203 n, err = r.buf.Read(p) 204 if n > 0 { 205 return n, err 206 } 207 if r.err != nil { 208 return 0, r.err 209 } 210 r.wait.Wait() 211 } 212 } 213 214 func (r *bufReader) Close() error { 215 closer, ok := r.reader.(io.ReadCloser) 216 if !ok { 217 return nil 218 } 219 return closer.Close() 220 } 221 222 func HashData(src io.Reader) (string, error) { 223 h := sha256.New() 224 if _, err := io.Copy(h, src); err != nil { 225 return "", err 226 } 227 return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil 228 } 229 230 type OnEOFReader struct { 231 Rc io.ReadCloser 232 Fn func() 233 } 234 235 func (r *OnEOFReader) Read(p []byte) (n int, err error) { 236 n, err = r.Rc.Read(p) 237 if err == io.EOF { 238 r.runFunc() 239 } 240 return 241 } 242 243 func (r *OnEOFReader) Close() error { 244 err := r.Rc.Close() 245 r.runFunc() 246 return err 247 } 248 249 func (r *OnEOFReader) runFunc() { 250 if fn := r.Fn; fn != nil { 251 fn() 252 r.Fn = nil 253 } 254 }