Home | History | Annotate | Download | only in internal
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // The wire protocol for HTTP's "chunked" Transfer-Encoding.
      6 
      7 // Package internal contains HTTP internals shared by net/http and
      8 // net/http/httputil.
      9 package internal
     10 
     11 import (
     12 	"bufio"
     13 	"bytes"
     14 	"errors"
     15 	"fmt"
     16 	"io"
     17 )
     18 
     19 const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
     20 
     21 var ErrLineTooLong = errors.New("header line too long")
     22 
     23 // NewChunkedReader returns a new chunkedReader that translates the data read from r
     24 // out of HTTP "chunked" format before returning it.
     25 // The chunkedReader returns io.EOF when the final 0-length chunk is read.
     26 //
     27 // NewChunkedReader is not needed by normal applications. The http package
     28 // automatically decodes chunking when reading response bodies.
     29 func NewChunkedReader(r io.Reader) io.Reader {
     30 	br, ok := r.(*bufio.Reader)
     31 	if !ok {
     32 		br = bufio.NewReader(r)
     33 	}
     34 	return &chunkedReader{r: br}
     35 }
     36 
     37 type chunkedReader struct {
     38 	r   *bufio.Reader
     39 	n   uint64 // unread bytes in chunk
     40 	err error
     41 	buf [2]byte
     42 }
     43 
     44 func (cr *chunkedReader) beginChunk() {
     45 	// chunk-size CRLF
     46 	var line []byte
     47 	line, cr.err = readLine(cr.r)
     48 	if cr.err != nil {
     49 		return
     50 	}
     51 	cr.n, cr.err = parseHexUint(line)
     52 	if cr.err != nil {
     53 		return
     54 	}
     55 	if cr.n == 0 {
     56 		cr.err = io.EOF
     57 	}
     58 }
     59 
     60 func (cr *chunkedReader) chunkHeaderAvailable() bool {
     61 	n := cr.r.Buffered()
     62 	if n > 0 {
     63 		peek, _ := cr.r.Peek(n)
     64 		return bytes.IndexByte(peek, '\n') >= 0
     65 	}
     66 	return false
     67 }
     68 
     69 func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
     70 	for cr.err == nil {
     71 		if cr.n == 0 {
     72 			if n > 0 && !cr.chunkHeaderAvailable() {
     73 				// We've read enough. Don't potentially block
     74 				// reading a new chunk header.
     75 				break
     76 			}
     77 			cr.beginChunk()
     78 			continue
     79 		}
     80 		if len(b) == 0 {
     81 			break
     82 		}
     83 		rbuf := b
     84 		if uint64(len(rbuf)) > cr.n {
     85 			rbuf = rbuf[:cr.n]
     86 		}
     87 		var n0 int
     88 		n0, cr.err = cr.r.Read(rbuf)
     89 		n += n0
     90 		b = b[n0:]
     91 		cr.n -= uint64(n0)
     92 		// If we're at the end of a chunk, read the next two
     93 		// bytes to verify they are "\r\n".
     94 		if cr.n == 0 && cr.err == nil {
     95 			if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil {
     96 				if cr.buf[0] != '\r' || cr.buf[1] != '\n' {
     97 					cr.err = errors.New("malformed chunked encoding")
     98 				}
     99 			}
    100 		}
    101 	}
    102 	return n, cr.err
    103 }
    104 
    105 // Read a line of bytes (up to \n) from b.
    106 // Give up if the line exceeds maxLineLength.
    107 // The returned bytes are a pointer into storage in
    108 // the bufio, so they are only valid until the next bufio read.
    109 func readLine(b *bufio.Reader) (p []byte, err error) {
    110 	if p, err = b.ReadSlice('\n'); err != nil {
    111 		// We always know when EOF is coming.
    112 		// If the caller asked for a line, there should be a line.
    113 		if err == io.EOF {
    114 			err = io.ErrUnexpectedEOF
    115 		} else if err == bufio.ErrBufferFull {
    116 			err = ErrLineTooLong
    117 		}
    118 		return nil, err
    119 	}
    120 	if len(p) >= maxLineLength {
    121 		return nil, ErrLineTooLong
    122 	}
    123 	return trimTrailingWhitespace(p), nil
    124 }
    125 
    126 func trimTrailingWhitespace(b []byte) []byte {
    127 	for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
    128 		b = b[:len(b)-1]
    129 	}
    130 	return b
    131 }
    132 
    133 func isASCIISpace(b byte) bool {
    134 	return b == ' ' || b == '\t' || b == '\n' || b == '\r'
    135 }
    136 
    137 // NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
    138 // "chunked" format before writing them to w. Closing the returned chunkedWriter
    139 // sends the final 0-length chunk that marks the end of the stream.
    140 //
    141 // NewChunkedWriter is not needed by normal applications. The http
    142 // package adds chunking automatically if handlers don't set a
    143 // Content-Length header. Using newChunkedWriter inside a handler
    144 // would result in double chunking or chunking with a Content-Length
    145 // length, both of which are wrong.
    146 func NewChunkedWriter(w io.Writer) io.WriteCloser {
    147 	return &chunkedWriter{w}
    148 }
    149 
    150 // Writing to chunkedWriter translates to writing in HTTP chunked Transfer
    151 // Encoding wire format to the underlying Wire chunkedWriter.
    152 type chunkedWriter struct {
    153 	Wire io.Writer
    154 }
    155 
    156 // Write the contents of data as one chunk to Wire.
    157 // NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
    158 // a bug since it does not check for success of io.WriteString
    159 func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
    160 
    161 	// Don't send 0-length data. It looks like EOF for chunked encoding.
    162 	if len(data) == 0 {
    163 		return 0, nil
    164 	}
    165 
    166 	if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
    167 		return 0, err
    168 	}
    169 	if n, err = cw.Wire.Write(data); err != nil {
    170 		return
    171 	}
    172 	if n != len(data) {
    173 		err = io.ErrShortWrite
    174 		return
    175 	}
    176 	if _, err = io.WriteString(cw.Wire, "\r\n"); err != nil {
    177 		return
    178 	}
    179 	if bw, ok := cw.Wire.(*FlushAfterChunkWriter); ok {
    180 		err = bw.Flush()
    181 	}
    182 	return
    183 }
    184 
    185 func (cw *chunkedWriter) Close() error {
    186 	_, err := io.WriteString(cw.Wire, "0\r\n")
    187 	return err
    188 }
    189 
    190 // FlushAfterChunkWriter signals from the caller of NewChunkedWriter
    191 // that each chunk should be followed by a flush. It is used by the
    192 // http.Transport code to keep the buffering behavior for headers and
    193 // trailers, but flush out chunks aggressively in the middle for
    194 // request bodies which may be generated slowly. See Issue 6574.
    195 type FlushAfterChunkWriter struct {
    196 	*bufio.Writer
    197 }
    198 
    199 func parseHexUint(v []byte) (n uint64, err error) {
    200 	for _, b := range v {
    201 		n <<= 4
    202 		switch {
    203 		case '0' <= b && b <= '9':
    204 			b = b - '0'
    205 		case 'a' <= b && b <= 'f':
    206 			b = b - 'a' + 10
    207 		case 'A' <= b && b <= 'F':
    208 			b = b - 'A' + 10
    209 		default:
    210 			return 0, errors.New("invalid byte in chunk length")
    211 		}
    212 		n |= uint64(b)
    213 	}
    214 	return
    215 }
    216