github.com/ader1990/go@v0.0.0-20140630135419-8c24447fa791/src/pkg/net/http/httputil/chunked.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // The wire protocol for HTTP's "chunked" Transfer-Encoding. 6 7 // This code is duplicated in net/http and net/http/httputil. 8 // Please make any changes in both files. 9 10 package httputil 11 12 import ( 13 "bufio" 14 "bytes" 15 "errors" 16 "fmt" 17 "io" 18 ) 19 20 const maxLineLength = 4096 // assumed <= bufio.defaultBufSize 21 22 var ErrLineTooLong = errors.New("header line too long") 23 24 // newChunkedReader returns a new chunkedReader that translates the data read from r 25 // out of HTTP "chunked" format before returning it. 26 // The chunkedReader returns io.EOF when the final 0-length chunk is read. 27 // 28 // newChunkedReader is not needed by normal applications. The http package 29 // automatically decodes chunking when reading response bodies. 30 func newChunkedReader(r io.Reader) io.Reader { 31 br, ok := r.(*bufio.Reader) 32 if !ok { 33 br = bufio.NewReader(r) 34 } 35 return &chunkedReader{r: br} 36 } 37 38 type chunkedReader struct { 39 r *bufio.Reader 40 n uint64 // unread bytes in chunk 41 err error 42 buf [2]byte 43 } 44 45 func (cr *chunkedReader) beginChunk() { 46 // chunk-size CRLF 47 var line []byte 48 line, cr.err = readLine(cr.r) 49 if cr.err != nil { 50 return 51 } 52 cr.n, cr.err = parseHexUint(line) 53 if cr.err != nil { 54 return 55 } 56 if cr.n == 0 { 57 cr.err = io.EOF 58 } 59 } 60 61 func (cr *chunkedReader) chunkHeaderAvailable() bool { 62 n := cr.r.Buffered() 63 if n > 0 { 64 peek, _ := cr.r.Peek(n) 65 return bytes.IndexByte(peek, '\n') >= 0 66 } 67 return false 68 } 69 70 func (cr *chunkedReader) Read(b []uint8) (n int, err error) { 71 for cr.err == nil { 72 if cr.n == 0 { 73 if n > 0 && !cr.chunkHeaderAvailable() { 74 // We've read enough. Don't potentially block 75 // reading a new chunk header. 76 break 77 } 78 cr.beginChunk() 79 continue 80 } 81 if len(b) == 0 { 82 break 83 } 84 rbuf := b 85 if uint64(len(rbuf)) > cr.n { 86 rbuf = rbuf[:cr.n] 87 } 88 var n0 int 89 n0, cr.err = cr.r.Read(rbuf) 90 n += n0 91 b = b[n0:] 92 cr.n -= uint64(n0) 93 // If we're at the end of a chunk, read the next two 94 // bytes to verify they are "\r\n". 95 if cr.n == 0 && cr.err == nil { 96 if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { 97 if cr.buf[0] != '\r' || cr.buf[1] != '\n' { 98 cr.err = errors.New("malformed chunked encoding") 99 } 100 } 101 } 102 } 103 return n, cr.err 104 } 105 106 // Read a line of bytes (up to \n) from b. 107 // Give up if the line exceeds maxLineLength. 108 // The returned bytes are a pointer into storage in 109 // the bufio, so they are only valid until the next bufio read. 110 func readLine(b *bufio.Reader) (p []byte, err error) { 111 if p, err = b.ReadSlice('\n'); err != nil { 112 // We always know when EOF is coming. 113 // If the caller asked for a line, there should be a line. 114 if err == io.EOF { 115 err = io.ErrUnexpectedEOF 116 } else if err == bufio.ErrBufferFull { 117 err = ErrLineTooLong 118 } 119 return nil, err 120 } 121 if len(p) >= maxLineLength { 122 return nil, ErrLineTooLong 123 } 124 return trimTrailingWhitespace(p), nil 125 } 126 127 func trimTrailingWhitespace(b []byte) []byte { 128 for len(b) > 0 && isASCIISpace(b[len(b)-1]) { 129 b = b[:len(b)-1] 130 } 131 return b 132 } 133 134 func isASCIISpace(b byte) bool { 135 return b == ' ' || b == '\t' || b == '\n' || b == '\r' 136 } 137 138 // newChunkedWriter returns a new chunkedWriter that translates writes into HTTP 139 // "chunked" format before writing them to w. Closing the returned chunkedWriter 140 // sends the final 0-length chunk that marks the end of the stream. 141 // 142 // newChunkedWriter is not needed by normal applications. The http 143 // package adds chunking automatically if handlers don't set a 144 // Content-Length header. Using newChunkedWriter inside a handler 145 // would result in double chunking or chunking with a Content-Length 146 // length, both of which are wrong. 147 func newChunkedWriter(w io.Writer) io.WriteCloser { 148 return &chunkedWriter{w} 149 } 150 151 // Writing to chunkedWriter translates to writing in HTTP chunked Transfer 152 // Encoding wire format to the underlying Wire chunkedWriter. 153 type chunkedWriter struct { 154 Wire io.Writer 155 } 156 157 // Write the contents of data as one chunk to Wire. 158 // NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has 159 // a bug since it does not check for success of io.WriteString 160 func (cw *chunkedWriter) Write(data []byte) (n int, err error) { 161 162 // Don't send 0-length data. It looks like EOF for chunked encoding. 163 if len(data) == 0 { 164 return 0, nil 165 } 166 167 if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { 168 return 0, err 169 } 170 if n, err = cw.Wire.Write(data); err != nil { 171 return 172 } 173 if n != len(data) { 174 err = io.ErrShortWrite 175 return 176 } 177 _, err = io.WriteString(cw.Wire, "\r\n") 178 179 return 180 } 181 182 func (cw *chunkedWriter) Close() error { 183 _, err := io.WriteString(cw.Wire, "0\r\n") 184 return err 185 } 186 187 func parseHexUint(v []byte) (n uint64, err error) { 188 for _, b := range v { 189 n <<= 4 190 switch { 191 case '0' <= b && b <= '9': 192 b = b - '0' 193 case 'a' <= b && b <= 'f': 194 b = b - 'a' + 10 195 case 'A' <= b && b <= 'F': 196 b = b - 'A' + 10 197 default: 198 return 0, errors.New("invalid byte in chunk length") 199 } 200 n |= uint64(b) 201 } 202 return 203 }