github.com/lmars/docker@v1.6.0-rc2/pkg/ioutils/readers.go (about) 1 package ioutils 2 3 import ( 4 "bytes" 5 "crypto/rand" 6 "io" 7 "math/big" 8 "sync" 9 "time" 10 ) 11 12 type readCloserWrapper struct { 13 io.Reader 14 closer func() error 15 } 16 17 func (r *readCloserWrapper) Close() error { 18 return r.closer() 19 } 20 21 func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { 22 return &readCloserWrapper{ 23 Reader: r, 24 closer: closer, 25 } 26 } 27 28 type readerErrWrapper struct { 29 reader io.Reader 30 closer func() 31 } 32 33 func (r *readerErrWrapper) Read(p []byte) (int, error) { 34 n, err := r.reader.Read(p) 35 if err != nil { 36 r.closer() 37 } 38 return n, err 39 } 40 41 func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { 42 return &readerErrWrapper{ 43 reader: r, 44 closer: closer, 45 } 46 } 47 48 // bufReader allows the underlying reader to continue to produce 49 // output by pre-emptively reading from the wrapped reader. 50 // This is achieved by buffering this data in bufReader's 51 // expanding buffer. 52 type bufReader struct { 53 sync.Mutex 54 buf *bytes.Buffer 55 reader io.Reader 56 err error 57 wait sync.Cond 58 drainBuf []byte 59 reuseBuf []byte 60 maxReuse int64 61 resetTimeout time.Duration 62 bufLenResetThreshold int64 63 maxReadDataReset int64 64 } 65 66 func NewBufReader(r io.Reader) *bufReader { 67 var timeout int 68 if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { 69 timeout = int(randVal.Int64()) + 180 70 } else { 71 timeout = 300 72 } 73 reader := &bufReader{ 74 buf: &bytes.Buffer{}, 75 drainBuf: make([]byte, 1024), 76 reuseBuf: make([]byte, 4096), 77 maxReuse: 1000, 78 resetTimeout: time.Second * time.Duration(timeout), 79 bufLenResetThreshold: 100 * 1024, 80 maxReadDataReset: 10 * 1024 * 1024, 81 reader: r, 82 } 83 reader.wait.L = &reader.Mutex 84 go reader.drain() 85 return reader 86 } 87 88 func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { 89 reader := &bufReader{ 90 buf: buffer, 91 drainBuf: drainBuffer, 92 reader: r, 93 } 94 reader.wait.L = &reader.Mutex 95 go reader.drain() 96 return reader 97 } 98 99 func (r *bufReader) drain() { 100 var ( 101 duration time.Duration 102 lastReset time.Time 103 now time.Time 104 reset bool 105 bufLen int64 106 dataSinceReset int64 107 maxBufLen int64 108 reuseBufLen int64 109 reuseCount int64 110 ) 111 reuseBufLen = int64(len(r.reuseBuf)) 112 lastReset = time.Now() 113 for { 114 n, err := r.reader.Read(r.drainBuf) 115 dataSinceReset += int64(n) 116 r.Lock() 117 bufLen = int64(r.buf.Len()) 118 if bufLen > maxBufLen { 119 maxBufLen = bufLen 120 } 121 122 // Avoid unbounded growth of the buffer over time. 123 // This has been discovered to be the only non-intrusive 124 // solution to the unbounded growth of the buffer. 125 // Alternative solutions such as compression, multiple 126 // buffers, channels and other similar pieces of code 127 // were reducing throughput, overall Docker performance 128 // or simply crashed Docker. 129 // This solution releases the buffer when specific 130 // conditions are met to avoid the continuous resizing 131 // of the buffer for long lived containers. 132 // 133 // Move data to the front of the buffer if it's 134 // smaller than what reuseBuf can store 135 if bufLen > 0 && reuseBufLen >= bufLen { 136 n, _ := r.buf.Read(r.reuseBuf) 137 r.buf.Write(r.reuseBuf[0:n]) 138 // Take action if the buffer has been reused too many 139 // times and if there's data in the buffer. 140 // The timeout is also used as means to avoid doing 141 // these operations more often or less often than 142 // required. 143 // The various conditions try to detect heavy activity 144 // in the buffer which might be indicators of heavy 145 // growth of the buffer. 146 } else if reuseCount >= r.maxReuse && bufLen > 0 { 147 now = time.Now() 148 duration = now.Sub(lastReset) 149 timeoutReached := duration >= r.resetTimeout 150 151 // The timeout has been reached and the 152 // buffered data couldn't be moved to the front 153 // of the buffer, so the buffer gets reset. 154 if timeoutReached && bufLen > reuseBufLen { 155 reset = true 156 } 157 // The amount of buffered data is too high now, 158 // reset the buffer. 159 if timeoutReached && maxBufLen >= r.bufLenResetThreshold { 160 reset = true 161 } 162 // Reset the buffer if a certain amount of 163 // data has gone through the buffer since the 164 // last reset. 165 if timeoutReached && dataSinceReset >= r.maxReadDataReset { 166 reset = true 167 } 168 // The buffered data is moved to a fresh buffer, 169 // swap the old buffer with the new one and 170 // reset all counters. 171 if reset { 172 newbuf := &bytes.Buffer{} 173 newbuf.ReadFrom(r.buf) 174 r.buf = newbuf 175 lastReset = now 176 reset = false 177 dataSinceReset = 0 178 maxBufLen = 0 179 reuseCount = 0 180 } 181 } 182 if err != nil { 183 r.err = err 184 } else { 185 r.buf.Write(r.drainBuf[0:n]) 186 } 187 reuseCount++ 188 r.wait.Signal() 189 r.Unlock() 190 if err != nil { 191 break 192 } 193 } 194 } 195 196 func (r *bufReader) Read(p []byte) (n int, err error) { 197 r.Lock() 198 defer r.Unlock() 199 for { 200 n, err = r.buf.Read(p) 201 if n > 0 { 202 return n, err 203 } 204 if r.err != nil { 205 return 0, r.err 206 } 207 r.wait.Wait() 208 } 209 } 210 211 func (r *bufReader) Close() error { 212 closer, ok := r.reader.(io.ReadCloser) 213 if !ok { 214 return nil 215 } 216 return closer.Close() 217 }