// Copyright 2009 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.// The wire protocol for HTTP's "chunked" Transfer-Encoding.
package internalimport ()constmaxLineLength = 4096// assumed <= bufio.defaultBufSizevarErrLineTooLong = errors.New("header line too long")// NewChunkedReader returns a new chunkedReader that translates the data read from r// out of HTTP "chunked" format before returning it.// The chunkedReader returns io.EOF when the final 0-length chunk is read.//// NewChunkedReader is not needed by normal applications. The http package// automatically decodes chunking when reading response bodies.func ( io.Reader) io.Reader { , := .(*bufio.Reader)if ! { = bufio.NewReader() }return &chunkedReader{r: }}typechunkedReaderstruct {r *bufio.Readernuint64// unread bytes in chunkerrerrorbuf [2]bytecheckEndbool// whether need to check for \r\n chunk footerexcessint64// "excessive" chunk overhead, for malicious sender detection}func ( *chunkedReader) () {// chunk-size CRLFvar []byte , .err = readChunkLine(.r)if .err != nil {return } .excess += int64(len()) + 2// header, plus \r\n after the chunk data = trimTrailingWhitespace() , .err = removeChunkExtension()if .err != nil {return } .n, .err = parseHexUint()if .err != nil {return }// A sender who sends one byte per chunk will send 5 bytes of overhead // for every byte of data. ("1\r\nX\r\n" to send "X".) // We want to allow this, since streaming a byte at a time can be legitimate. // // A sender can use chunk extensions to add arbitrary amounts of additional // data per byte read. ("1;very long extension\r\nX\r\n" to send "X".) // We don't want to disallow extensions (although we discard them), // but we also don't want to allow a sender to reduce the signal/noise ratio // arbitrarily. // // We track the amount of excess overhead read, // and produce an error if it grows too large. // // Currently, we say that we're willing to accept 16 bytes of overhead per chunk, // plus twice the amount of real data in the chunk. .excess -= 16 + (2 * int64(.n)) .excess = max(.excess, 0)if .excess > 16*1024 { .err = errors.New("chunked encoding contains too much non-data") }if .n == 0 { .err = io.EOF }}func ( *chunkedReader) () bool { := .r.Buffered()if > 0 { , := .r.Peek()returnbytes.IndexByte(, '\n') >= 0 }returnfalse}func ( *chunkedReader) ( []uint8) ( int, error) {for .err == nil {if .checkEnd {if > 0 && .r.Buffered() < 2 {// We have some data. Return early (per the io.Reader // contract) instead of potentially blocking while // reading more.break }if _, .err = io.ReadFull(.r, .buf[:2]); .err == nil {ifstring(.buf[:]) != "\r\n" { .err = errors.New("malformed chunked encoding")break } } else {if .err == io.EOF { .err = io.ErrUnexpectedEOF }break } .checkEnd = false }if .n == 0 {if > 0 && !.chunkHeaderAvailable() {// We've read enough. Don't potentially block // reading a new chunk header.break } .beginChunk()continue }iflen() == 0 {break } := ifuint64(len()) > .n { = [:.n] }varint , .err = .r.Read() += = [:] .n -= uint64()// If we're at the end of a chunk, read the next two // bytes to verify they are "\r\n".if .n == 0 && .err == nil { .checkEnd = true } elseif .err == io.EOF { .err = io.ErrUnexpectedEOF } }return , .err}// Read a line of bytes (up to \n) from b.// Give up if the line exceeds maxLineLength.// The returned bytes are owned by the bufio.Reader// so they are only valid until the next bufio read.func ( *bufio.Reader) ([]byte, error) { , := .ReadSlice('\n')if != nil {// We always know when EOF is coming. // If the caller asked for a line, there should be a line.if == io.EOF { = io.ErrUnexpectedEOF } elseif == bufio.ErrBufferFull { = ErrLineTooLong }returnnil, }iflen() >= maxLineLength {returnnil, ErrLineTooLong }return , nil}func ( []byte) []byte {forlen() > 0 && isASCIISpace([len()-1]) { = [:len()-1] }return}func ( byte) bool {return == ' ' || == '\t' || == '\n' || == '\r'}varsemi = []byte(";")// removeChunkExtension removes any chunk-extension from p.// For example,//// "0" => "0"// "0;token" => "0"// "0;token=val" => "0"// `0;token="quoted string"` => "0"func ( []byte) ([]byte, error) { , _, _ = bytes.Cut(, semi)// TODO: care about exact syntax of chunk extensions? We're // ignoring and stripping them anyway. For now just never // return an error.return , nil}// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP// "chunked" format before writing them to w. Closing the returned chunkedWriter// sends the final 0-length chunk that marks the end of the stream but does// not send the final CRLF that appears after trailers; trailers and the last// CRLF must be written separately.//// NewChunkedWriter is not needed by normal applications. The http// package adds chunking automatically if handlers don't set a// Content-Length header. Using newChunkedWriter inside a handler// would result in double chunking or chunking with a Content-Length// length, both of which are wrong.func ( io.Writer) io.WriteCloser {return &chunkedWriter{}}// Writing to chunkedWriter translates to writing in HTTP chunked Transfer// Encoding wire format to the underlying Wire chunkedWriter.typechunkedWriterstruct {Wireio.Writer}// Write the contents of data as one chunk to Wire.// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has// a bug since it does not check for success of io.WriteStringfunc ( *chunkedWriter) ( []byte) ( int, error) {// Don't send 0-length data. It looks like EOF for chunked encoding.iflen() == 0 {return0, nil }if _, = fmt.Fprintf(.Wire, "%x\r\n", len()); != nil {return0, }if , = .Wire.Write(); != nil {return }if != len() { = io.ErrShortWritereturn }if _, = io.WriteString(.Wire, "\r\n"); != nil {return }if , := .Wire.(*FlushAfterChunkWriter); { = .Flush() }return}func ( *chunkedWriter) () error { , := io.WriteString(.Wire, "0\r\n")return}// FlushAfterChunkWriter signals from the caller of NewChunkedWriter// that each chunk should be followed by a flush. It is used by the// http.Transport code to keep the buffering behavior for headers and// trailers, but flush out chunks aggressively in the middle for// request bodies which may be generated slowly. See Issue 6574.typeFlushAfterChunkWriterstruct { *bufio.Writer}func ( []byte) ( uint64, error) {for , := range {switch {case'0' <= && <= '9': = - '0'case'a' <= && <= 'f': = - 'a' + 10case'A' <= && <= 'F': = - 'A' + 10default:return0, errors.New("invalid byte in chunk length") }if == 16 {return0, errors.New("http chunk length too large") } <<= 4 |= uint64() }return}
The pages are generated with Goldsv0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds.