1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 2616. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "context" 13 "crypto/tls" 14 "errors" 15 "fmt" 16 "io" 17 "io/ioutil" 18 "log" 19 "net" 20 "net/textproto" 21 "net/url" 22 "os" 23 "path" 24 "runtime" 25 "strconv" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "golang_org/x/net/lex/httplex" 32 ) 33 34 // Errors used by the HTTP server. 35 var ( 36 // ErrBodyNotAllowed is returned by ResponseWriter.Write calls 37 // when the HTTP method or response code does not permit a 38 // body. 39 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 40 41 // ErrHijacked is returned by ResponseWriter.Write calls when 42 // the underlying connection has been hijacked using the 43 // Hijacker interface. A zero-byte write on a hijacked 44 // connection will return ErrHijacked without any other side 45 // effects. 46 ErrHijacked = errors.New("http: connection has been hijacked") 47 48 // ErrContentLength is returned by ResponseWriter.Write calls 49 // when a Handler set a Content-Length response header with a 50 // declared size and then attempted to write more bytes than 51 // declared. 52 ErrContentLength = errors.New("http: wrote more than the declared Content-Length") 53 54 // Deprecated: ErrWriteAfterFlush is no longer used. 55 ErrWriteAfterFlush = errors.New("unused") 56 ) 57 58 // A Handler responds to an HTTP request. 59 // 60 // ServeHTTP should write reply headers and data to the ResponseWriter 61 // and then return. Returning signals that the request is finished; it 62 // is not valid to use the ResponseWriter or read from the 63 // Request.Body after or concurrently with the completion of the 64 // ServeHTTP call. 65 // 66 // Depending on the HTTP client software, HTTP protocol version, and 67 // any intermediaries between the client and the Go server, it may not 68 // be possible to read from the Request.Body after writing to the 69 // ResponseWriter. Cautious handlers should read the Request.Body 70 // first, and then reply. 71 // 72 // Except for reading the body, handlers should not modify the 73 // provided Request. 74 // 75 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 76 // that the effect of the panic was isolated to the active request. 77 // It recovers the panic, logs a stack trace to the server error log, 78 // and either closes the network connection or sends an HTTP/2 79 // RST_STREAM, depending on the HTTP protocol. To abort a handler so 80 // the client sees an interrupted response but the server doesn't log 81 // an error, panic with the value ErrAbortHandler. 82 type Handler interface { 83 ServeHTTP(ResponseWriter, *Request) 84 } 85 86 // A ResponseWriter interface is used by an HTTP handler to 87 // construct an HTTP response. 88 // 89 // A ResponseWriter may not be used after the Handler.ServeHTTP method 90 // has returned. 91 type ResponseWriter interface { 92 // Header returns the header map that will be sent by 93 // WriteHeader. The Header map also is the mechanism with which 94 // Handlers can set HTTP trailers. 95 // 96 // Changing the header map after a call to WriteHeader (or 97 // Write) has no effect unless the modified headers are 98 // trailers. 99 // 100 // There are two ways to set Trailers. The preferred way is to 101 // predeclare in the headers which trailers you will later 102 // send by setting the "Trailer" header to the names of the 103 // trailer keys which will come later. In this case, those 104 // keys of the Header map are treated as if they were 105 // trailers. See the example. The second way, for trailer 106 // keys not known to the Handler until after the first Write, 107 // is to prefix the Header map keys with the TrailerPrefix 108 // constant value. See TrailerPrefix. 109 // 110 // To suppress implicit response headers (such as "Date"), set 111 // their value to nil. 112 Header() Header 113 114 // Write writes the data to the connection as part of an HTTP reply. 115 // 116 // If WriteHeader has not yet been called, Write calls 117 // WriteHeader(http.StatusOK) before writing the data. If the Header 118 // does not contain a Content-Type line, Write adds a Content-Type set 119 // to the result of passing the initial 512 bytes of written data to 120 // DetectContentType. 121 // 122 // Depending on the HTTP protocol version and the client, calling 123 // Write or WriteHeader may prevent future reads on the 124 // Request.Body. For HTTP/1.x requests, handlers should read any 125 // needed request body data before writing the response. Once the 126 // headers have been flushed (due to either an explicit Flusher.Flush 127 // call or writing enough data to trigger a flush), the request body 128 // may be unavailable. For HTTP/2 requests, the Go HTTP server permits 129 // handlers to continue to read the request body while concurrently 130 // writing the response. However, such behavior may not be supported 131 // by all HTTP/2 clients. Handlers should read before writing if 132 // possible to maximize compatibility. 133 Write([]byte) (int, error) 134 135 // WriteHeader sends an HTTP response header with the provided 136 // status code. 137 // 138 // If WriteHeader is not called explicitly, the first call to Write 139 // will trigger an implicit WriteHeader(http.StatusOK). 140 // Thus explicit calls to WriteHeader are mainly used to 141 // send error codes. 142 // 143 // The provided code must be a valid HTTP 1xx-5xx status code. 144 // Only one header may be written. Go does not currently 145 // support sending user-defined 1xx informational headers, 146 // with the exception of 100-continue response header that the 147 // Server sends automatically when the Request.Body is read. 148 WriteHeader(statusCode int) 149 } 150 151 // The Flusher interface is implemented by ResponseWriters that allow 152 // an HTTP handler to flush buffered data to the client. 153 // 154 // The default HTTP/1.x and HTTP/2 ResponseWriter implementations 155 // support Flusher, but ResponseWriter wrappers may not. Handlers 156 // should always test for this ability at runtime. 157 // 158 // Note that even for ResponseWriters that support Flush, 159 // if the client is connected through an HTTP proxy, 160 // the buffered data may not reach the client until the response 161 // completes. 162 type Flusher interface { 163 // Flush sends any buffered data to the client. 164 Flush() 165 } 166 167 // The Hijacker interface is implemented by ResponseWriters that allow 168 // an HTTP handler to take over the connection. 169 // 170 // The default ResponseWriter for HTTP/1.x connections supports 171 // Hijacker, but HTTP/2 connections intentionally do not. 172 // ResponseWriter wrappers may also not support Hijacker. Handlers 173 // should always test for this ability at runtime. 174 type Hijacker interface { 175 // Hijack lets the caller take over the connection. 176 // After a call to Hijack the HTTP server library 177 // will not do anything else with the connection. 178 // 179 // It becomes the caller's responsibility to manage 180 // and close the connection. 181 // 182 // The returned net.Conn may have read or write deadlines 183 // already set, depending on the configuration of the 184 // Server. It is the caller's responsibility to set 185 // or clear those deadlines as needed. 186 // 187 // The returned bufio.Reader may contain unprocessed buffered 188 // data from the client. 189 // 190 // After a call to Hijack, the original Request.Body must 191 // not be used. 192 Hijack() (net.Conn, *bufio.ReadWriter, error) 193 } 194 195 // The CloseNotifier interface is implemented by ResponseWriters which 196 // allow detecting when the underlying connection has gone away. 197 // 198 // This mechanism can be used to cancel long operations on the server 199 // if the client has disconnected before the response is ready. 200 type CloseNotifier interface { 201 // CloseNotify returns a channel that receives at most a 202 // single value (true) when the client connection has gone 203 // away. 204 // 205 // CloseNotify may wait to notify until Request.Body has been 206 // fully read. 207 // 208 // After the Handler has returned, there is no guarantee 209 // that the channel receives a value. 210 // 211 // If the protocol is HTTP/1.1 and CloseNotify is called while 212 // processing an idempotent request (such a GET) while 213 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 214 // pipelined request may cause a value to be sent on the 215 // returned channel. In practice HTTP/1.1 pipelining is not 216 // enabled in browsers and not seen often in the wild. If this 217 // is a problem, use HTTP/2 or only use CloseNotify on methods 218 // such as POST. 219 CloseNotify() <-chan bool 220 } 221 222 var ( 223 // ServerContextKey is a context key. It can be used in HTTP 224 // handlers with context.WithValue to access the server that 225 // started the handler. The associated value will be of 226 // type *Server. 227 ServerContextKey = &contextKey{"http-server"} 228 229 // LocalAddrContextKey is a context key. It can be used in 230 // HTTP handlers with context.WithValue to access the address 231 // the local address the connection arrived on. 232 // The associated value will be of type net.Addr. 233 LocalAddrContextKey = &contextKey{"local-addr"} 234 ) 235 236 // A conn represents the server side of an HTTP connection. 237 type conn struct { 238 // server is the server on which the connection arrived. 239 // Immutable; never nil. 240 server *Server 241 242 // cancelCtx cancels the connection-level context. 243 cancelCtx context.CancelFunc 244 245 // rwc is the underlying network connection. 246 // This is never wrapped by other types and is the value given out 247 // to CloseNotifier callers. It is usually of type *net.TCPConn or 248 // *tls.Conn. 249 rwc net.Conn 250 251 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 252 // inside the Listener's Accept goroutine, as some implementations block. 253 // It is populated immediately inside the (*conn).serve goroutine. 254 // This is the value of a Handler's (*Request).RemoteAddr. 255 remoteAddr string 256 257 // tlsState is the TLS connection state when using TLS. 258 // nil means not TLS. 259 tlsState *tls.ConnectionState 260 261 // werr is set to the first write error to rwc. 262 // It is set via checkConnErrorWriter{w}, where bufw writes. 263 werr error 264 265 // r is bufr's read source. It's a wrapper around rwc that provides 266 // io.LimitedReader-style limiting (while reading request headers) 267 // and functionality to support CloseNotifier. See *connReader docs. 268 r *connReader 269 270 // bufr reads from r. 271 bufr *bufio.Reader 272 273 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 274 bufw *bufio.Writer 275 276 // lastMethod is the method of the most recent request 277 // on this connection, if any. 278 lastMethod string 279 280 curReq atomic.Value // of *response (which has a Request in it) 281 282 curState atomic.Value // of ConnState 283 284 // mu guards hijackedv 285 mu sync.Mutex 286 287 // hijackedv is whether this connection has been hijacked 288 // by a Handler with the Hijacker interface. 289 // It is guarded by mu. 290 hijackedv bool 291 } 292 293 func (c *conn) hijacked() bool { 294 c.mu.Lock() 295 defer c.mu.Unlock() 296 return c.hijackedv 297 } 298 299 // c.mu must be held. 300 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 301 if c.hijackedv { 302 return nil, nil, ErrHijacked 303 } 304 c.r.abortPendingRead() 305 306 c.hijackedv = true 307 rwc = c.rwc 308 rwc.SetDeadline(time.Time{}) 309 310 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 311 if c.r.hasByte { 312 if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil { 313 return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err) 314 } 315 } 316 c.setState(rwc, StateHijacked) 317 return 318 } 319 320 // This should be >= 512 bytes for DetectContentType, 321 // but otherwise it's somewhat arbitrary. 322 const bufferBeforeChunkingSize = 2048 323 324 // chunkWriter writes to a response's conn buffer, and is the writer 325 // wrapped by the response.bufw buffered writer. 326 // 327 // chunkWriter also is responsible for finalizing the Header, including 328 // conditionally setting the Content-Type and setting a Content-Length 329 // in cases where the handler's final output is smaller than the buffer 330 // size. It also conditionally adds chunk headers, when in chunking mode. 331 // 332 // See the comment above (*response).Write for the entire write flow. 333 type chunkWriter struct { 334 res *response 335 336 // header is either nil or a deep clone of res.handlerHeader 337 // at the time of res.WriteHeader, if res.WriteHeader is 338 // called and extra buffering is being done to calculate 339 // Content-Type and/or Content-Length. 340 header Header 341 342 // wroteHeader tells whether the header's been written to "the 343 // wire" (or rather: w.conn.buf). this is unlike 344 // (*response).wroteHeader, which tells only whether it was 345 // logically written. 346 wroteHeader bool 347 348 // set by the writeHeader method: 349 chunking bool // using chunked transfer encoding for reply body 350 } 351 352 var ( 353 crlf = []byte("\r\n") 354 colonSpace = []byte(": ") 355 ) 356 357 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 358 if !cw.wroteHeader { 359 cw.writeHeader(p) 360 } 361 if cw.res.req.Method == "HEAD" { 362 // Eat writes. 363 return len(p), nil 364 } 365 if cw.chunking { 366 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 367 if err != nil { 368 cw.res.conn.rwc.Close() 369 return 370 } 371 } 372 n, err = cw.res.conn.bufw.Write(p) 373 if cw.chunking && err == nil { 374 _, err = cw.res.conn.bufw.Write(crlf) 375 } 376 if err != nil { 377 cw.res.conn.rwc.Close() 378 } 379 return 380 } 381 382 func (cw *chunkWriter) flush() { 383 if !cw.wroteHeader { 384 cw.writeHeader(nil) 385 } 386 cw.res.conn.bufw.Flush() 387 } 388 389 func (cw *chunkWriter) close() { 390 if !cw.wroteHeader { 391 cw.writeHeader(nil) 392 } 393 if cw.chunking { 394 bw := cw.res.conn.bufw // conn's bufio writer 395 // zero chunk to mark EOF 396 bw.WriteString("0\r\n") 397 if trailers := cw.res.finalTrailers(); trailers != nil { 398 trailers.Write(bw) // the writer handles noting errors 399 } 400 // final blank line after the trailers (whether 401 // present or not) 402 bw.WriteString("\r\n") 403 } 404 } 405 406 // A response represents the server side of an HTTP response. 407 type response struct { 408 conn *conn 409 req *Request // request for this response 410 reqBody io.ReadCloser 411 cancelCtx context.CancelFunc // when ServeHTTP exits 412 wroteHeader bool // reply header has been (logically) written 413 wroteContinue bool // 100 Continue response was written 414 wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive" 415 wantsClose bool // HTTP request has Connection "close" 416 417 w *bufio.Writer // buffers output in chunks to chunkWriter 418 cw chunkWriter 419 420 // handlerHeader is the Header that Handlers get access to, 421 // which may be retained and mutated even after WriteHeader. 422 // handlerHeader is copied into cw.header at WriteHeader 423 // time, and privately mutated thereafter. 424 handlerHeader Header 425 calledHeader bool // handler accessed handlerHeader via Header 426 427 written int64 // number of bytes written in body 428 contentLength int64 // explicitly-declared Content-Length; or -1 429 status int // status code passed to WriteHeader 430 431 // close connection after this reply. set on request and 432 // updated after response from handler if there's a 433 // "Connection: keep-alive" response header and a 434 // Content-Length. 435 closeAfterReply bool 436 437 // requestBodyLimitHit is set by requestTooLarge when 438 // maxBytesReader hits its max size. It is checked in 439 // WriteHeader, to make sure we don't consume the 440 // remaining request body to try to advance to the next HTTP 441 // request. Instead, when this is set, we stop reading 442 // subsequent requests on this connection and stop reading 443 // input from it. 444 requestBodyLimitHit bool 445 446 // trailers are the headers to be sent after the handler 447 // finishes writing the body. This field is initialized from 448 // the Trailer response header when the response header is 449 // written. 450 trailers []string 451 452 handlerDone atomicBool // set true when the handler exits 453 454 // Buffers for Date, Content-Length, and status code 455 dateBuf [len(TimeFormat)]byte 456 clenBuf [10]byte 457 statusBuf [3]byte 458 459 // closeNotifyCh is the channel returned by CloseNotify. 460 // TODO(bradfitz): this is currently (for Go 1.8) always 461 // non-nil. Make this lazily-created again as it used to be? 462 closeNotifyCh chan bool 463 didCloseNotify int32 // atomic (only 0->1 winner should send) 464 } 465 466 // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys 467 // that, if present, signals that the map entry is actually for 468 // the response trailers, and not the response headers. The prefix 469 // is stripped after the ServeHTTP call finishes and the values are 470 // sent in the trailers. 471 // 472 // This mechanism is intended only for trailers that are not known 473 // prior to the headers being written. If the set of trailers is fixed 474 // or known before the header is written, the normal Go trailers mechanism 475 // is preferred: 476 // https://golang.org/pkg/net/http/#ResponseWriter 477 // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers 478 const TrailerPrefix = "Trailer:" 479 480 // finalTrailers is called after the Handler exits and returns a non-nil 481 // value if the Handler set any trailers. 482 func (w *response) finalTrailers() Header { 483 var t Header 484 for k, vv := range w.handlerHeader { 485 if strings.HasPrefix(k, TrailerPrefix) { 486 if t == nil { 487 t = make(Header) 488 } 489 t[strings.TrimPrefix(k, TrailerPrefix)] = vv 490 } 491 } 492 for _, k := range w.trailers { 493 if t == nil { 494 t = make(Header) 495 } 496 for _, v := range w.handlerHeader[k] { 497 t.Add(k, v) 498 } 499 } 500 return t 501 } 502 503 type atomicBool int32 504 505 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 506 func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 507 508 // declareTrailer is called for each Trailer header when the 509 // response header is written. It notes that a header will need to be 510 // written in the trailers at the end of the response. 511 func (w *response) declareTrailer(k string) { 512 k = CanonicalHeaderKey(k) 513 switch k { 514 case "Transfer-Encoding", "Content-Length", "Trailer": 515 // Forbidden by RFC 2616 14.40. 516 return 517 } 518 w.trailers = append(w.trailers, k) 519 } 520 521 // requestTooLarge is called by maxBytesReader when too much input has 522 // been read from the client. 523 func (w *response) requestTooLarge() { 524 w.closeAfterReply = true 525 w.requestBodyLimitHit = true 526 if !w.wroteHeader { 527 w.Header().Set("Connection", "close") 528 } 529 } 530 531 // needsSniff reports whether a Content-Type still needs to be sniffed. 532 func (w *response) needsSniff() bool { 533 _, haveType := w.handlerHeader["Content-Type"] 534 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 535 } 536 537 // writerOnly hides an io.Writer value's optional ReadFrom method 538 // from io.Copy. 539 type writerOnly struct { 540 io.Writer 541 } 542 543 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 544 switch v := src.(type) { 545 case *os.File: 546 fi, err := v.Stat() 547 if err != nil { 548 return false, err 549 } 550 return fi.Mode().IsRegular(), nil 551 case *io.LimitedReader: 552 return srcIsRegularFile(v.R) 553 default: 554 return 555 } 556 } 557 558 // ReadFrom is here to optimize copying from an *os.File regular file 559 // to a *net.TCPConn with sendfile. 560 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 561 // Our underlying w.conn.rwc is usually a *TCPConn (with its 562 // own ReadFrom method). If not, or if our src isn't a regular 563 // file, just fall back to the normal copy method. 564 rf, ok := w.conn.rwc.(io.ReaderFrom) 565 regFile, err := srcIsRegularFile(src) 566 if err != nil { 567 return 0, err 568 } 569 if !ok || !regFile { 570 bufp := copyBufPool.Get().(*[]byte) 571 defer copyBufPool.Put(bufp) 572 return io.CopyBuffer(writerOnly{w}, src, *bufp) 573 } 574 575 // sendfile path: 576 577 if !w.wroteHeader { 578 w.WriteHeader(StatusOK) 579 } 580 581 if w.needsSniff() { 582 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 583 n += n0 584 if err != nil { 585 return n, err 586 } 587 } 588 589 w.w.Flush() // get rid of any previous writes 590 w.cw.flush() // make sure Header is written; flush data to rwc 591 592 // Now that cw has been flushed, its chunking field is guaranteed initialized. 593 if !w.cw.chunking && w.bodyAllowed() { 594 n0, err := rf.ReadFrom(src) 595 n += n0 596 w.written += n0 597 return n, err 598 } 599 600 n0, err := io.Copy(writerOnly{w}, src) 601 n += n0 602 return n, err 603 } 604 605 // debugServerConnections controls whether all server connections are wrapped 606 // with a verbose logging wrapper. 607 const debugServerConnections = false 608 609 // Create new connection from rwc. 610 func (srv *Server) newConn(rwc net.Conn) *conn { 611 c := &conn{ 612 server: srv, 613 rwc: rwc, 614 } 615 if debugServerConnections { 616 c.rwc = newLoggingConn("server", c.rwc) 617 } 618 return c 619 } 620 621 type readResult struct { 622 n int 623 err error 624 b byte // byte read, if n == 1 625 } 626 627 // connReader is the io.Reader wrapper used by *conn. It combines a 628 // selectively-activated io.LimitedReader (to bound request header 629 // read sizes) with support for selectively keeping an io.Reader.Read 630 // call blocked in a background goroutine to wait for activity and 631 // trigger a CloseNotifier channel. 632 type connReader struct { 633 conn *conn 634 635 mu sync.Mutex // guards following 636 hasByte bool 637 byteBuf [1]byte 638 cond *sync.Cond 639 inRead bool 640 aborted bool // set true before conn.rwc deadline is set to past 641 remain int64 // bytes remaining 642 } 643 644 func (cr *connReader) lock() { 645 cr.mu.Lock() 646 if cr.cond == nil { 647 cr.cond = sync.NewCond(&cr.mu) 648 } 649 } 650 651 func (cr *connReader) unlock() { cr.mu.Unlock() } 652 653 func (cr *connReader) startBackgroundRead() { 654 cr.lock() 655 defer cr.unlock() 656 if cr.inRead { 657 panic("invalid concurrent Body.Read call") 658 } 659 if cr.hasByte { 660 return 661 } 662 cr.inRead = true 663 cr.conn.rwc.SetReadDeadline(time.Time{}) 664 go cr.backgroundRead() 665 } 666 667 func (cr *connReader) backgroundRead() { 668 n, err := cr.conn.rwc.Read(cr.byteBuf[:]) 669 cr.lock() 670 if n == 1 { 671 cr.hasByte = true 672 // We were at EOF already (since we wouldn't be in a 673 // background read otherwise), so this is a pipelined 674 // HTTP request. 675 cr.closeNotifyFromPipelinedRequest() 676 } 677 if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { 678 // Ignore this error. It's the expected error from 679 // another goroutine calling abortPendingRead. 680 } else if err != nil { 681 cr.handleReadError(err) 682 } 683 cr.aborted = false 684 cr.inRead = false 685 cr.unlock() 686 cr.cond.Broadcast() 687 } 688 689 func (cr *connReader) abortPendingRead() { 690 cr.lock() 691 defer cr.unlock() 692 if !cr.inRead { 693 return 694 } 695 cr.aborted = true 696 cr.conn.rwc.SetReadDeadline(aLongTimeAgo) 697 for cr.inRead { 698 cr.cond.Wait() 699 } 700 cr.conn.rwc.SetReadDeadline(time.Time{}) 701 } 702 703 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 704 func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 } 705 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 706 707 // may be called from multiple goroutines. 708 func (cr *connReader) handleReadError(err error) { 709 cr.conn.cancelCtx() 710 cr.closeNotify() 711 } 712 713 // closeNotifyFromPipelinedRequest simply calls closeNotify. 714 // 715 // This method wrapper is here for documentation. The callers are the 716 // cases where we send on the closenotify channel because of a 717 // pipelined HTTP request, per the previous Go behavior and 718 // documentation (that this "MAY" happen). 719 // 720 // TODO: consider changing this behavior and making context 721 // cancelation and closenotify work the same. 722 func (cr *connReader) closeNotifyFromPipelinedRequest() { 723 cr.closeNotify() 724 } 725 726 // may be called from multiple goroutines. 727 func (cr *connReader) closeNotify() { 728 res, _ := cr.conn.curReq.Load().(*response) 729 if res != nil { 730 if atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { 731 res.closeNotifyCh <- true 732 } 733 } 734 } 735 736 func (cr *connReader) Read(p []byte) (n int, err error) { 737 cr.lock() 738 if cr.inRead { 739 cr.unlock() 740 if cr.conn.hijacked() { 741 panic("invalid Body.Read call. After hijacked, the original Request must not be used") 742 } 743 panic("invalid concurrent Body.Read call") 744 } 745 if cr.hitReadLimit() { 746 cr.unlock() 747 return 0, io.EOF 748 } 749 if len(p) == 0 { 750 cr.unlock() 751 return 0, nil 752 } 753 if int64(len(p)) > cr.remain { 754 p = p[:cr.remain] 755 } 756 if cr.hasByte { 757 p[0] = cr.byteBuf[0] 758 cr.hasByte = false 759 cr.unlock() 760 return 1, nil 761 } 762 cr.inRead = true 763 cr.unlock() 764 n, err = cr.conn.rwc.Read(p) 765 766 cr.lock() 767 cr.inRead = false 768 if err != nil { 769 cr.handleReadError(err) 770 } 771 cr.remain -= int64(n) 772 cr.unlock() 773 774 cr.cond.Broadcast() 775 return n, err 776 } 777 778 var ( 779 bufioReaderPool sync.Pool 780 bufioWriter2kPool sync.Pool 781 bufioWriter4kPool sync.Pool 782 ) 783 784 var copyBufPool = sync.Pool{ 785 New: func() interface{} { 786 b := make([]byte, 32*1024) 787 return &b 788 }, 789 } 790 791 func bufioWriterPool(size int) *sync.Pool { 792 switch size { 793 case 2 << 10: 794 return &bufioWriter2kPool 795 case 4 << 10: 796 return &bufioWriter4kPool 797 } 798 return nil 799 } 800 801 func newBufioReader(r io.Reader) *bufio.Reader { 802 if v := bufioReaderPool.Get(); v != nil { 803 br := v.(*bufio.Reader) 804 br.Reset(r) 805 return br 806 } 807 // Note: if this reader size is ever changed, update 808 // TestHandlerBodyClose's assumptions. 809 return bufio.NewReader(r) 810 } 811 812 func putBufioReader(br *bufio.Reader) { 813 br.Reset(nil) 814 bufioReaderPool.Put(br) 815 } 816 817 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 818 pool := bufioWriterPool(size) 819 if pool != nil { 820 if v := pool.Get(); v != nil { 821 bw := v.(*bufio.Writer) 822 bw.Reset(w) 823 return bw 824 } 825 } 826 return bufio.NewWriterSize(w, size) 827 } 828 829 func putBufioWriter(bw *bufio.Writer) { 830 bw.Reset(nil) 831 if pool := bufioWriterPool(bw.Available()); pool != nil { 832 pool.Put(bw) 833 } 834 } 835 836 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 837 // in an HTTP request. 838 // This can be overridden by setting Server.MaxHeaderBytes. 839 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 840 841 func (srv *Server) maxHeaderBytes() int { 842 if srv.MaxHeaderBytes > 0 { 843 return srv.MaxHeaderBytes 844 } 845 return DefaultMaxHeaderBytes 846 } 847 848 func (srv *Server) initialReadLimitSize() int64 { 849 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 850 } 851 852 // wrapper around io.ReadCloser which on first read, sends an 853 // HTTP/1.1 100 Continue header 854 type expectContinueReader struct { 855 resp *response 856 readCloser io.ReadCloser 857 closed bool 858 sawEOF bool 859 } 860 861 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 862 if ecr.closed { 863 return 0, ErrBodyReadAfterClose 864 } 865 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 866 ecr.resp.wroteContinue = true 867 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 868 ecr.resp.conn.bufw.Flush() 869 } 870 n, err = ecr.readCloser.Read(p) 871 if err == io.EOF { 872 ecr.sawEOF = true 873 } 874 return 875 } 876 877 func (ecr *expectContinueReader) Close() error { 878 ecr.closed = true 879 return ecr.readCloser.Close() 880 } 881 882 // TimeFormat is the time format to use when generating times in HTTP 883 // headers. It is like time.RFC1123 but hard-codes GMT as the time 884 // zone. The time being formatted must be in UTC for Format to 885 // generate the correct format. 886 // 887 // For parsing this time format, see ParseTime. 888 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 889 890 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 891 func appendTime(b []byte, t time.Time) []byte { 892 const days = "SunMonTueWedThuFriSat" 893 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 894 895 t = t.UTC() 896 yy, mm, dd := t.Date() 897 hh, mn, ss := t.Clock() 898 day := days[3*t.Weekday():] 899 mon := months[3*(mm-1):] 900 901 return append(b, 902 day[0], day[1], day[2], ',', ' ', 903 byte('0'+dd/10), byte('0'+dd%10), ' ', 904 mon[0], mon[1], mon[2], ' ', 905 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 906 byte('0'+hh/10), byte('0'+hh%10), ':', 907 byte('0'+mn/10), byte('0'+mn%10), ':', 908 byte('0'+ss/10), byte('0'+ss%10), ' ', 909 'G', 'M', 'T') 910 } 911 912 var errTooLarge = errors.New("http: request too large") 913 914 // Read next request from connection. 915 func (c *conn) readRequest(ctx context.Context) (w *response, err error) { 916 if c.hijacked() { 917 return nil, ErrHijacked 918 } 919 920 var ( 921 wholeReqDeadline time.Time // or zero if none 922 hdrDeadline time.Time // or zero if none 923 ) 924 t0 := time.Now() 925 if d := c.server.readHeaderTimeout(); d != 0 { 926 hdrDeadline = t0.Add(d) 927 } 928 if d := c.server.ReadTimeout; d != 0 { 929 wholeReqDeadline = t0.Add(d) 930 } 931 c.rwc.SetReadDeadline(hdrDeadline) 932 if d := c.server.WriteTimeout; d != 0 { 933 defer func() { 934 c.rwc.SetWriteDeadline(time.Now().Add(d)) 935 }() 936 } 937 938 c.r.setReadLimit(c.server.initialReadLimitSize()) 939 if c.lastMethod == "POST" { 940 // RFC 2616 section 4.1 tolerance for old buggy clients. 941 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 942 c.bufr.Discard(numLeadingCRorLF(peek)) 943 } 944 req, err := readRequest(c.bufr, keepHostHeader) 945 if err != nil { 946 if c.r.hitReadLimit() { 947 return nil, errTooLarge 948 } 949 return nil, err 950 } 951 952 if !http1ServerSupportsRequest(req) { 953 return nil, badRequestError("unsupported protocol version") 954 } 955 956 c.lastMethod = req.Method 957 c.r.setInfiniteReadLimit() 958 959 hosts, haveHost := req.Header["Host"] 960 isH2Upgrade := req.isH2Upgrade() 961 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" { 962 return nil, badRequestError("missing required Host header") 963 } 964 if len(hosts) > 1 { 965 return nil, badRequestError("too many Host headers") 966 } 967 if len(hosts) == 1 && !httplex.ValidHostHeader(hosts[0]) { 968 return nil, badRequestError("malformed Host header") 969 } 970 for k, vv := range req.Header { 971 if !httplex.ValidHeaderFieldName(k) { 972 return nil, badRequestError("invalid header name") 973 } 974 for _, v := range vv { 975 if !httplex.ValidHeaderFieldValue(v) { 976 return nil, badRequestError("invalid header value") 977 } 978 } 979 } 980 delete(req.Header, "Host") 981 982 ctx, cancelCtx := context.WithCancel(ctx) 983 req.ctx = ctx 984 req.RemoteAddr = c.remoteAddr 985 req.TLS = c.tlsState 986 if body, ok := req.Body.(*body); ok { 987 body.doEarlyClose = true 988 } 989 990 // Adjust the read deadline if necessary. 991 if !hdrDeadline.Equal(wholeReqDeadline) { 992 c.rwc.SetReadDeadline(wholeReqDeadline) 993 } 994 995 w = &response{ 996 conn: c, 997 cancelCtx: cancelCtx, 998 req: req, 999 reqBody: req.Body, 1000 handlerHeader: make(Header), 1001 contentLength: -1, 1002 closeNotifyCh: make(chan bool, 1), 1003 1004 // We populate these ahead of time so we're not 1005 // reading from req.Header after their Handler starts 1006 // and maybe mutates it (Issue 14940) 1007 wants10KeepAlive: req.wantsHttp10KeepAlive(), 1008 wantsClose: req.wantsClose(), 1009 } 1010 if isH2Upgrade { 1011 w.closeAfterReply = true 1012 } 1013 w.cw.res = w 1014 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 1015 return w, nil 1016 } 1017 1018 // http1ServerSupportsRequest reports whether Go's HTTP/1.x server 1019 // supports the given request. 1020 func http1ServerSupportsRequest(req *Request) bool { 1021 if req.ProtoMajor == 1 { 1022 return true 1023 } 1024 // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can 1025 // wire up their own HTTP/2 upgrades. 1026 if req.ProtoMajor == 2 && req.ProtoMinor == 0 && 1027 req.Method == "PRI" && req.RequestURI == "*" { 1028 return true 1029 } 1030 // Reject HTTP/0.x, and all other HTTP/2+ requests (which 1031 // aren't encoded in ASCII anyway). 1032 return false 1033 } 1034 1035 func (w *response) Header() Header { 1036 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 1037 // Accessing the header between logically writing it 1038 // and physically writing it means we need to allocate 1039 // a clone to snapshot the logically written state. 1040 w.cw.header = w.handlerHeader.clone() 1041 } 1042 w.calledHeader = true 1043 return w.handlerHeader 1044 } 1045 1046 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 1047 // consumed by a handler that the server will read from the client 1048 // in order to keep a connection alive. If there are more bytes than 1049 // this then the server to be paranoid instead sends a "Connection: 1050 // close" response. 1051 // 1052 // This number is approximately what a typical machine's TCP buffer 1053 // size is anyway. (if we have the bytes on the machine, we might as 1054 // well read them) 1055 const maxPostHandlerReadBytes = 256 << 10 1056 1057 func checkWriteHeaderCode(code int) { 1058 // Issue 22880: require valid WriteHeader status codes. 1059 // For now we only enforce that it's three digits. 1060 // In the future we might block things over 599 (600 and above aren't defined 1061 // at http://httpwg.org/specs/rfc7231.html#status.codes) 1062 // and we might block under 200 (once we have more mature 1xx support). 1063 // But for now any three digits. 1064 // 1065 // We used to send "HTTP/1.1 000 0" on the wire in responses but there's 1066 // no equivalent bogus thing we can realistically send in HTTP/2, 1067 // so we'll consistently panic instead and help people find their bugs 1068 // early. (We can't return an error from WriteHeader even if we wanted to.) 1069 if code < 100 || code > 999 { 1070 panic(fmt.Sprintf("invalid WriteHeader code %v", code)) 1071 } 1072 } 1073 1074 func (w *response) WriteHeader(code int) { 1075 if w.conn.hijacked() { 1076 w.conn.server.logf("http: response.WriteHeader on hijacked connection") 1077 return 1078 } 1079 if w.wroteHeader { 1080 w.conn.server.logf("http: multiple response.WriteHeader calls") 1081 return 1082 } 1083 checkWriteHeaderCode(code) 1084 w.wroteHeader = true 1085 w.status = code 1086 1087 if w.calledHeader && w.cw.header == nil { 1088 w.cw.header = w.handlerHeader.clone() 1089 } 1090 1091 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 1092 v, err := strconv.ParseInt(cl, 10, 64) 1093 if err == nil && v >= 0 { 1094 w.contentLength = v 1095 } else { 1096 w.conn.server.logf("http: invalid Content-Length of %q", cl) 1097 w.handlerHeader.Del("Content-Length") 1098 } 1099 } 1100 } 1101 1102 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 1103 // This type is used to avoid extra allocations from cloning and/or populating 1104 // the response Header map and all its 1-element slices. 1105 type extraHeader struct { 1106 contentType string 1107 connection string 1108 transferEncoding string 1109 date []byte // written if not nil 1110 contentLength []byte // written if not nil 1111 } 1112 1113 // Sorted the same as extraHeader.Write's loop. 1114 var extraHeaderKeys = [][]byte{ 1115 []byte("Content-Type"), 1116 []byte("Connection"), 1117 []byte("Transfer-Encoding"), 1118 } 1119 1120 var ( 1121 headerContentLength = []byte("Content-Length: ") 1122 headerDate = []byte("Date: ") 1123 ) 1124 1125 // Write writes the headers described in h to w. 1126 // 1127 // This method has a value receiver, despite the somewhat large size 1128 // of h, because it prevents an allocation. The escape analysis isn't 1129 // smart enough to realize this function doesn't mutate h. 1130 func (h extraHeader) Write(w *bufio.Writer) { 1131 if h.date != nil { 1132 w.Write(headerDate) 1133 w.Write(h.date) 1134 w.Write(crlf) 1135 } 1136 if h.contentLength != nil { 1137 w.Write(headerContentLength) 1138 w.Write(h.contentLength) 1139 w.Write(crlf) 1140 } 1141 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 1142 if v != "" { 1143 w.Write(extraHeaderKeys[i]) 1144 w.Write(colonSpace) 1145 w.WriteString(v) 1146 w.Write(crlf) 1147 } 1148 } 1149 } 1150 1151 // writeHeader finalizes the header sent to the client and writes it 1152 // to cw.res.conn.bufw. 1153 // 1154 // p is not written by writeHeader, but is the first chunk of the body 1155 // that will be written. It is sniffed for a Content-Type if none is 1156 // set explicitly. It's also used to set the Content-Length, if the 1157 // total body size was small and the handler has already finished 1158 // running. 1159 func (cw *chunkWriter) writeHeader(p []byte) { 1160 if cw.wroteHeader { 1161 return 1162 } 1163 cw.wroteHeader = true 1164 1165 w := cw.res 1166 keepAlivesEnabled := w.conn.server.doKeepAlives() 1167 isHEAD := w.req.Method == "HEAD" 1168 1169 // header is written out to w.conn.buf below. Depending on the 1170 // state of the handler, we either own the map or not. If we 1171 // don't own it, the exclude map is created lazily for 1172 // WriteSubset to remove headers. The setHeader struct holds 1173 // headers we need to add. 1174 header := cw.header 1175 owned := header != nil 1176 if !owned { 1177 header = w.handlerHeader 1178 } 1179 var excludeHeader map[string]bool 1180 delHeader := func(key string) { 1181 if owned { 1182 header.Del(key) 1183 return 1184 } 1185 if _, ok := header[key]; !ok { 1186 return 1187 } 1188 if excludeHeader == nil { 1189 excludeHeader = make(map[string]bool) 1190 } 1191 excludeHeader[key] = true 1192 } 1193 var setHeader extraHeader 1194 1195 // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix. 1196 trailers := false 1197 for k := range cw.header { 1198 if strings.HasPrefix(k, TrailerPrefix) { 1199 if excludeHeader == nil { 1200 excludeHeader = make(map[string]bool) 1201 } 1202 excludeHeader[k] = true 1203 trailers = true 1204 } 1205 } 1206 for _, v := range cw.header["Trailer"] { 1207 trailers = true 1208 foreachHeaderElement(v, cw.res.declareTrailer) 1209 } 1210 1211 te := header.get("Transfer-Encoding") 1212 hasTE := te != "" 1213 1214 // If the handler is done but never sent a Content-Length 1215 // response header and this is our first (and last) write, set 1216 // it, even to zero. This helps HTTP/1.0 clients keep their 1217 // "keep-alive" connections alive. 1218 // Exceptions: 304/204/1xx responses never get Content-Length, and if 1219 // it was a HEAD request, we don't know the difference between 1220 // 0 actual bytes and 0 bytes because the handler noticed it 1221 // was a HEAD request and chose not to write anything. So for 1222 // HEAD, the handler should either write the Content-Length or 1223 // write non-zero bytes. If it's actually 0 bytes and the 1224 // handler never looked at the Request.Method, we just don't 1225 // send a Content-Length header. 1226 // Further, we don't send an automatic Content-Length if they 1227 // set a Transfer-Encoding, because they're generally incompatible. 1228 if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 1229 w.contentLength = int64(len(p)) 1230 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 1231 } 1232 1233 // If this was an HTTP/1.0 request with keep-alive and we sent a 1234 // Content-Length back, we can make this a keep-alive response ... 1235 if w.wants10KeepAlive && keepAlivesEnabled { 1236 sentLength := header.get("Content-Length") != "" 1237 if sentLength && header.get("Connection") == "keep-alive" { 1238 w.closeAfterReply = false 1239 } 1240 } 1241 1242 // Check for an explicit (and valid) Content-Length header. 1243 hasCL := w.contentLength != -1 1244 1245 if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { 1246 _, connectionHeaderSet := header["Connection"] 1247 if !connectionHeaderSet { 1248 setHeader.connection = "keep-alive" 1249 } 1250 } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { 1251 w.closeAfterReply = true 1252 } 1253 1254 if header.get("Connection") == "close" || !keepAlivesEnabled { 1255 w.closeAfterReply = true 1256 } 1257 1258 // If the client wanted a 100-continue but we never sent it to 1259 // them (or, more strictly: we never finished reading their 1260 // request body), don't reuse this connection because it's now 1261 // in an unknown state: we might be sending this response at 1262 // the same time the client is now sending its request body 1263 // after a timeout. (Some HTTP clients send Expect: 1264 // 100-continue but knowing that some servers don't support 1265 // it, the clients set a timer and send the body later anyway) 1266 // If we haven't seen EOF, we can't skip over the unread body 1267 // because we don't know if the next bytes on the wire will be 1268 // the body-following-the-timer or the subsequent request. 1269 // See Issue 11549. 1270 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 1271 w.closeAfterReply = true 1272 } 1273 1274 // Per RFC 2616, we should consume the request body before 1275 // replying, if the handler hasn't already done so. But we 1276 // don't want to do an unbounded amount of reading here for 1277 // DoS reasons, so we only try up to a threshold. 1278 // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 1279 // about HTTP/1.x Handlers concurrently reading and writing, like 1280 // HTTP/2 handlers can do. Maybe this code should be relaxed? 1281 if w.req.ContentLength != 0 && !w.closeAfterReply { 1282 var discard, tooBig bool 1283 1284 switch bdy := w.req.Body.(type) { 1285 case *expectContinueReader: 1286 if bdy.resp.wroteContinue { 1287 discard = true 1288 } 1289 case *body: 1290 bdy.mu.Lock() 1291 switch { 1292 case bdy.closed: 1293 if !bdy.sawEOF { 1294 // Body was closed in handler with non-EOF error. 1295 w.closeAfterReply = true 1296 } 1297 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 1298 tooBig = true 1299 default: 1300 discard = true 1301 } 1302 bdy.mu.Unlock() 1303 default: 1304 discard = true 1305 } 1306 1307 if discard { 1308 _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) 1309 switch err { 1310 case nil: 1311 // There must be even more data left over. 1312 tooBig = true 1313 case ErrBodyReadAfterClose: 1314 // Body was already consumed and closed. 1315 case io.EOF: 1316 // The remaining body was just consumed, close it. 1317 err = w.reqBody.Close() 1318 if err != nil { 1319 w.closeAfterReply = true 1320 } 1321 default: 1322 // Some other kind of error occurred, like a read timeout, or 1323 // corrupt chunked encoding. In any case, whatever remains 1324 // on the wire must not be parsed as another HTTP request. 1325 w.closeAfterReply = true 1326 } 1327 } 1328 1329 if tooBig { 1330 w.requestTooLarge() 1331 delHeader("Connection") 1332 setHeader.connection = "close" 1333 } 1334 } 1335 1336 code := w.status 1337 if bodyAllowedForStatus(code) { 1338 // If no content type, apply sniffing algorithm to body. 1339 _, haveType := header["Content-Type"] 1340 if !haveType && !hasTE && len(p) > 0 { 1341 setHeader.contentType = DetectContentType(p) 1342 } 1343 } else { 1344 for _, k := range suppressedHeaders(code) { 1345 delHeader(k) 1346 } 1347 } 1348 1349 if _, ok := header["Date"]; !ok { 1350 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1351 } 1352 1353 if hasCL && hasTE && te != "identity" { 1354 // TODO: return an error if WriteHeader gets a return parameter 1355 // For now just ignore the Content-Length. 1356 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1357 te, w.contentLength) 1358 delHeader("Content-Length") 1359 hasCL = false 1360 } 1361 1362 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1363 // do nothing 1364 } else if code == StatusNoContent { 1365 delHeader("Transfer-Encoding") 1366 } else if hasCL { 1367 delHeader("Transfer-Encoding") 1368 } else if w.req.ProtoAtLeast(1, 1) { 1369 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1370 // content-length has been provided. The connection must be closed after the 1371 // reply is written, and no chunking is to be done. This is the setup 1372 // recommended in the Server-Sent Events candidate recommendation 11, 1373 // section 8. 1374 if hasTE && te == "identity" { 1375 cw.chunking = false 1376 w.closeAfterReply = true 1377 } else { 1378 // HTTP/1.1 or greater: use chunked transfer encoding 1379 // to avoid closing the connection at EOF. 1380 cw.chunking = true 1381 setHeader.transferEncoding = "chunked" 1382 if hasTE && te == "chunked" { 1383 // We will send the chunked Transfer-Encoding header later. 1384 delHeader("Transfer-Encoding") 1385 } 1386 } 1387 } else { 1388 // HTTP version < 1.1: cannot do chunked transfer 1389 // encoding and we don't know the Content-Length so 1390 // signal EOF by closing connection. 1391 w.closeAfterReply = true 1392 delHeader("Transfer-Encoding") // in case already set 1393 } 1394 1395 // Cannot use Content-Length with non-identity Transfer-Encoding. 1396 if cw.chunking { 1397 delHeader("Content-Length") 1398 } 1399 if !w.req.ProtoAtLeast(1, 0) { 1400 return 1401 } 1402 1403 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1404 delHeader("Connection") 1405 if w.req.ProtoAtLeast(1, 1) { 1406 setHeader.connection = "close" 1407 } 1408 } 1409 1410 writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:]) 1411 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1412 setHeader.Write(w.conn.bufw) 1413 w.conn.bufw.Write(crlf) 1414 } 1415 1416 // foreachHeaderElement splits v according to the "#rule" construction 1417 // in RFC 2616 section 2.1 and calls fn for each non-empty element. 1418 func foreachHeaderElement(v string, fn func(string)) { 1419 v = textproto.TrimString(v) 1420 if v == "" { 1421 return 1422 } 1423 if !strings.Contains(v, ",") { 1424 fn(v) 1425 return 1426 } 1427 for _, f := range strings.Split(v, ",") { 1428 if f = textproto.TrimString(f); f != "" { 1429 fn(f) 1430 } 1431 } 1432 } 1433 1434 // writeStatusLine writes an HTTP/1.x Status-Line (RFC 2616 Section 6.1) 1435 // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0. 1436 // code is the response status code. 1437 // scratch is an optional scratch buffer. If it has at least capacity 3, it's used. 1438 func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) { 1439 if is11 { 1440 bw.WriteString("HTTP/1.1 ") 1441 } else { 1442 bw.WriteString("HTTP/1.0 ") 1443 } 1444 if text, ok := statusText[code]; ok { 1445 bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10)) 1446 bw.WriteByte(' ') 1447 bw.WriteString(text) 1448 bw.WriteString("\r\n") 1449 } else { 1450 // don't worry about performance 1451 fmt.Fprintf(bw, "%03d status code %d\r\n", code, code) 1452 } 1453 } 1454 1455 // bodyAllowed reports whether a Write is allowed for this response type. 1456 // It's illegal to call this before the header has been flushed. 1457 func (w *response) bodyAllowed() bool { 1458 if !w.wroteHeader { 1459 panic("") 1460 } 1461 return bodyAllowedForStatus(w.status) 1462 } 1463 1464 // The Life Of A Write is like this: 1465 // 1466 // Handler starts. No header has been sent. The handler can either 1467 // write a header, or just start writing. Writing before sending a header 1468 // sends an implicitly empty 200 OK header. 1469 // 1470 // If the handler didn't declare a Content-Length up front, we either 1471 // go into chunking mode or, if the handler finishes running before 1472 // the chunking buffer size, we compute a Content-Length and send that 1473 // in the header instead. 1474 // 1475 // Likewise, if the handler didn't set a Content-Type, we sniff that 1476 // from the initial chunk of output. 1477 // 1478 // The Writers are wired together like: 1479 // 1480 // 1. *response (the ResponseWriter) -> 1481 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1482 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1483 // and which writes the chunk headers, if needed. 1484 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1485 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1486 // and populates c.werr with it if so. but otherwise writes to: 1487 // 6. the rwc, the net.Conn. 1488 // 1489 // TODO(bradfitz): short-circuit some of the buffering when the 1490 // initial header contains both a Content-Type and Content-Length. 1491 // Also short-circuit in (1) when the header's been sent and not in 1492 // chunking mode, writing directly to (4) instead, if (2) has no 1493 // buffered data. More generally, we could short-circuit from (1) to 1494 // (3) even in chunking mode if the write size from (1) is over some 1495 // threshold and nothing is in (2). The answer might be mostly making 1496 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1497 // with this instead. 1498 func (w *response) Write(data []byte) (n int, err error) { 1499 return w.write(len(data), data, "") 1500 } 1501 1502 func (w *response) WriteString(data string) (n int, err error) { 1503 return w.write(len(data), nil, data) 1504 } 1505 1506 // either dataB or dataS is non-zero. 1507 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1508 if w.conn.hijacked() { 1509 if lenData > 0 { 1510 w.conn.server.logf("http: response.Write on hijacked connection") 1511 } 1512 return 0, ErrHijacked 1513 } 1514 if !w.wroteHeader { 1515 w.WriteHeader(StatusOK) 1516 } 1517 if lenData == 0 { 1518 return 0, nil 1519 } 1520 if !w.bodyAllowed() { 1521 return 0, ErrBodyNotAllowed 1522 } 1523 1524 w.written += int64(lenData) // ignoring errors, for errorKludge 1525 if w.contentLength != -1 && w.written > w.contentLength { 1526 return 0, ErrContentLength 1527 } 1528 if dataB != nil { 1529 return w.w.Write(dataB) 1530 } else { 1531 return w.w.WriteString(dataS) 1532 } 1533 } 1534 1535 func (w *response) finishRequest() { 1536 w.handlerDone.setTrue() 1537 1538 if !w.wroteHeader { 1539 w.WriteHeader(StatusOK) 1540 } 1541 1542 w.w.Flush() 1543 putBufioWriter(w.w) 1544 w.cw.close() 1545 w.conn.bufw.Flush() 1546 1547 w.conn.r.abortPendingRead() 1548 1549 // Close the body (regardless of w.closeAfterReply) so we can 1550 // re-use its bufio.Reader later safely. 1551 w.reqBody.Close() 1552 1553 if w.req.MultipartForm != nil { 1554 w.req.MultipartForm.RemoveAll() 1555 } 1556 } 1557 1558 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1559 // It must only be called after the handler is done executing. 1560 func (w *response) shouldReuseConnection() bool { 1561 if w.closeAfterReply { 1562 // The request or something set while executing the 1563 // handler indicated we shouldn't reuse this 1564 // connection. 1565 return false 1566 } 1567 1568 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1569 // Did not write enough. Avoid getting out of sync. 1570 return false 1571 } 1572 1573 // There was some error writing to the underlying connection 1574 // during the request, so don't re-use this conn. 1575 if w.conn.werr != nil { 1576 return false 1577 } 1578 1579 if w.closedRequestBodyEarly() { 1580 return false 1581 } 1582 1583 return true 1584 } 1585 1586 func (w *response) closedRequestBodyEarly() bool { 1587 body, ok := w.req.Body.(*body) 1588 return ok && body.didEarlyClose() 1589 } 1590 1591 func (w *response) Flush() { 1592 if !w.wroteHeader { 1593 w.WriteHeader(StatusOK) 1594 } 1595 w.w.Flush() 1596 w.cw.flush() 1597 } 1598 1599 func (c *conn) finalFlush() { 1600 if c.bufr != nil { 1601 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1602 // reader for a future connection. 1603 putBufioReader(c.bufr) 1604 c.bufr = nil 1605 } 1606 1607 if c.bufw != nil { 1608 c.bufw.Flush() 1609 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1610 // writer for a future connection. 1611 putBufioWriter(c.bufw) 1612 c.bufw = nil 1613 } 1614 } 1615 1616 // Close the connection. 1617 func (c *conn) close() { 1618 c.finalFlush() 1619 c.rwc.Close() 1620 } 1621 1622 // rstAvoidanceDelay is the amount of time we sleep after closing the 1623 // write side of a TCP connection before closing the entire socket. 1624 // By sleeping, we increase the chances that the client sees our FIN 1625 // and processes its final data before they process the subsequent RST 1626 // from closing a connection with known unread data. 1627 // This RST seems to occur mostly on BSD systems. (And Windows?) 1628 // This timeout is somewhat arbitrary (~latency around the planet). 1629 const rstAvoidanceDelay = 500 * time.Millisecond 1630 1631 type closeWriter interface { 1632 CloseWrite() error 1633 } 1634 1635 var _ closeWriter = (*net.TCPConn)(nil) 1636 1637 // closeWrite flushes any outstanding data and sends a FIN packet (if 1638 // client is connected via TCP), signalling that we're done. We then 1639 // pause for a bit, hoping the client processes it before any 1640 // subsequent RST. 1641 // 1642 // See https://golang.org/issue/3595 1643 func (c *conn) closeWriteAndWait() { 1644 c.finalFlush() 1645 if tcp, ok := c.rwc.(closeWriter); ok { 1646 tcp.CloseWrite() 1647 } 1648 time.Sleep(rstAvoidanceDelay) 1649 } 1650 1651 // validNPN reports whether the proto is not a blacklisted Next 1652 // Protocol Negotiation protocol. Empty and built-in protocol types 1653 // are blacklisted and can't be overridden with alternate 1654 // implementations. 1655 func validNPN(proto string) bool { 1656 switch proto { 1657 case "", "http/1.1", "http/1.0": 1658 return false 1659 } 1660 return true 1661 } 1662 1663 func (c *conn) setState(nc net.Conn, state ConnState) { 1664 srv := c.server 1665 switch state { 1666 case StateNew: 1667 srv.trackConn(c, true) 1668 case StateHijacked, StateClosed: 1669 srv.trackConn(c, false) 1670 } 1671 c.curState.Store(connStateInterface[state]) 1672 if hook := srv.ConnState; hook != nil { 1673 hook(nc, state) 1674 } 1675 } 1676 1677 // connStateInterface is an array of the interface{} versions of 1678 // ConnState values, so we can use them in atomic.Values later without 1679 // paying the cost of shoving their integers in an interface{}. 1680 var connStateInterface = [...]interface{}{ 1681 StateNew: StateNew, 1682 StateActive: StateActive, 1683 StateIdle: StateIdle, 1684 StateHijacked: StateHijacked, 1685 StateClosed: StateClosed, 1686 } 1687 1688 // badRequestError is a literal string (used by in the server in HTML, 1689 // unescaped) to tell the user why their request was bad. It should 1690 // be plain text without user info or other embedded errors. 1691 type badRequestError string 1692 1693 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1694 1695 // ErrAbortHandler is a sentinel panic value to abort a handler. 1696 // While any panic from ServeHTTP aborts the response to the client, 1697 // panicking with ErrAbortHandler also suppresses logging of a stack 1698 // trace to the server's error log. 1699 var ErrAbortHandler = errors.New("net/http: abort Handler") 1700 1701 // isCommonNetReadError reports whether err is a common error 1702 // encountered during reading a request off the network when the 1703 // client has gone away or had its read fail somehow. This is used to 1704 // determine which logs are interesting enough to log about. 1705 func isCommonNetReadError(err error) bool { 1706 if err == io.EOF { 1707 return true 1708 } 1709 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1710 return true 1711 } 1712 if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { 1713 return true 1714 } 1715 return false 1716 } 1717 1718 // Serve a new connection. 1719 func (c *conn) serve(ctx context.Context) { 1720 c.remoteAddr = c.rwc.RemoteAddr().String() 1721 ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr()) 1722 defer func() { 1723 if err := recover(); err != nil && err != ErrAbortHandler { 1724 const size = 64 << 10 1725 buf := make([]byte, size) 1726 buf = buf[:runtime.Stack(buf, false)] 1727 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1728 } 1729 if !c.hijacked() { 1730 c.close() 1731 c.setState(c.rwc, StateClosed) 1732 } 1733 }() 1734 1735 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1736 if d := c.server.ReadTimeout; d != 0 { 1737 c.rwc.SetReadDeadline(time.Now().Add(d)) 1738 } 1739 if d := c.server.WriteTimeout; d != 0 { 1740 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1741 } 1742 if err := tlsConn.Handshake(); err != nil { 1743 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1744 return 1745 } 1746 c.tlsState = new(tls.ConnectionState) 1747 *c.tlsState = tlsConn.ConnectionState() 1748 if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { 1749 if fn := c.server.TLSNextProto[proto]; fn != nil { 1750 h := initNPNRequest{tlsConn, serverHandler{c.server}} 1751 fn(c.server, tlsConn, h) 1752 } 1753 return 1754 } 1755 } 1756 1757 // HTTP/1.x from here on. 1758 1759 ctx, cancelCtx := context.WithCancel(ctx) 1760 c.cancelCtx = cancelCtx 1761 defer cancelCtx() 1762 1763 c.r = &connReader{conn: c} 1764 c.bufr = newBufioReader(c.r) 1765 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1766 1767 for { 1768 w, err := c.readRequest(ctx) 1769 if c.r.remain != c.server.initialReadLimitSize() { 1770 // If we read any bytes off the wire, we're active. 1771 c.setState(c.rwc, StateActive) 1772 } 1773 if err != nil { 1774 const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" 1775 1776 if err == errTooLarge { 1777 // Their HTTP client may or may not be 1778 // able to read this if we're 1779 // responding to them and hanging up 1780 // while they're still writing their 1781 // request. Undefined behavior. 1782 const publicErr = "431 Request Header Fields Too Large" 1783 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1784 c.closeWriteAndWait() 1785 return 1786 } 1787 if isCommonNetReadError(err) { 1788 return // don't reply 1789 } 1790 1791 publicErr := "400 Bad Request" 1792 if v, ok := err.(badRequestError); ok { 1793 publicErr = publicErr + ": " + string(v) 1794 } 1795 1796 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1797 return 1798 } 1799 1800 // Expect 100 Continue support 1801 req := w.req 1802 if req.expectsContinue() { 1803 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1804 // Wrap the Body reader with one that replies on the connection 1805 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1806 } 1807 } else if req.Header.get("Expect") != "" { 1808 w.sendExpectationFailed() 1809 return 1810 } 1811 1812 c.curReq.Store(w) 1813 1814 if requestBodyRemains(req.Body) { 1815 registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) 1816 } else { 1817 if w.conn.bufr.Buffered() > 0 { 1818 w.conn.r.closeNotifyFromPipelinedRequest() 1819 } 1820 w.conn.r.startBackgroundRead() 1821 } 1822 1823 // HTTP cannot have multiple simultaneous active requests.[*] 1824 // Until the server replies to this request, it can't read another, 1825 // so we might as well run the handler in this goroutine. 1826 // [*] Not strictly true: HTTP pipelining. We could let them all process 1827 // in parallel even if their responses need to be serialized. 1828 // But we're not going to implement HTTP pipelining because it 1829 // was never deployed in the wild and the answer is HTTP/2. 1830 serverHandler{c.server}.ServeHTTP(w, w.req) 1831 w.cancelCtx() 1832 if c.hijacked() { 1833 return 1834 } 1835 w.finishRequest() 1836 if !w.shouldReuseConnection() { 1837 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1838 c.closeWriteAndWait() 1839 } 1840 return 1841 } 1842 c.setState(c.rwc, StateIdle) 1843 c.curReq.Store((*response)(nil)) 1844 1845 if !w.conn.server.doKeepAlives() { 1846 // We're in shutdown mode. We might've replied 1847 // to the user without "Connection: close" and 1848 // they might think they can send another 1849 // request, but such is life with HTTP/1.1. 1850 return 1851 } 1852 1853 if d := c.server.idleTimeout(); d != 0 { 1854 c.rwc.SetReadDeadline(time.Now().Add(d)) 1855 if _, err := c.bufr.Peek(4); err != nil { 1856 return 1857 } 1858 } 1859 c.rwc.SetReadDeadline(time.Time{}) 1860 } 1861 } 1862 1863 func (w *response) sendExpectationFailed() { 1864 // TODO(bradfitz): let ServeHTTP handlers handle 1865 // requests with non-standard expectation[s]? Seems 1866 // theoretical at best, and doesn't fit into the 1867 // current ServeHTTP model anyway. We'd need to 1868 // make the ResponseWriter an optional 1869 // "ExpectReplier" interface or something. 1870 // 1871 // For now we'll just obey RFC 2616 14.20 which says 1872 // "If a server receives a request containing an 1873 // Expect field that includes an expectation- 1874 // extension that it does not support, it MUST 1875 // respond with a 417 (Expectation Failed) status." 1876 w.Header().Set("Connection", "close") 1877 w.WriteHeader(StatusExpectationFailed) 1878 w.finishRequest() 1879 } 1880 1881 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1882 // and a Hijacker. 1883 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1884 if w.handlerDone.isSet() { 1885 panic("net/http: Hijack called after ServeHTTP finished") 1886 } 1887 if w.wroteHeader { 1888 w.cw.flush() 1889 } 1890 1891 c := w.conn 1892 c.mu.Lock() 1893 defer c.mu.Unlock() 1894 1895 // Release the bufioWriter that writes to the chunk writer, it is not 1896 // used after a connection has been hijacked. 1897 rwc, buf, err = c.hijackLocked() 1898 if err == nil { 1899 putBufioWriter(w.w) 1900 w.w = nil 1901 } 1902 return rwc, buf, err 1903 } 1904 1905 func (w *response) CloseNotify() <-chan bool { 1906 if w.handlerDone.isSet() { 1907 panic("net/http: CloseNotify called after ServeHTTP finished") 1908 } 1909 return w.closeNotifyCh 1910 } 1911 1912 func registerOnHitEOF(rc io.ReadCloser, fn func()) { 1913 switch v := rc.(type) { 1914 case *expectContinueReader: 1915 registerOnHitEOF(v.readCloser, fn) 1916 case *body: 1917 v.registerOnHitEOF(fn) 1918 default: 1919 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1920 } 1921 } 1922 1923 // requestBodyRemains reports whether future calls to Read 1924 // on rc might yield more data. 1925 func requestBodyRemains(rc io.ReadCloser) bool { 1926 if rc == NoBody { 1927 return false 1928 } 1929 switch v := rc.(type) { 1930 case *expectContinueReader: 1931 return requestBodyRemains(v.readCloser) 1932 case *body: 1933 return v.bodyRemains() 1934 default: 1935 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1936 } 1937 } 1938 1939 // The HandlerFunc type is an adapter to allow the use of 1940 // ordinary functions as HTTP handlers. If f is a function 1941 // with the appropriate signature, HandlerFunc(f) is a 1942 // Handler that calls f. 1943 type HandlerFunc func(ResponseWriter, *Request) 1944 1945 // ServeHTTP calls f(w, r). 1946 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 1947 f(w, r) 1948 } 1949 1950 // Helper handlers 1951 1952 // Error replies to the request with the specified error message and HTTP code. 1953 // It does not otherwise end the request; the caller should ensure no further 1954 // writes are done to w. 1955 // The error message should be plain text. 1956 func Error(w ResponseWriter, error string, code int) { 1957 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 1958 w.Header().Set("X-Content-Type-Options", "nosniff") 1959 w.WriteHeader(code) 1960 fmt.Fprintln(w, error) 1961 } 1962 1963 // NotFound replies to the request with an HTTP 404 not found error. 1964 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 1965 1966 // NotFoundHandler returns a simple request handler 1967 // that replies to each request with a ``404 page not found'' reply. 1968 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 1969 1970 // StripPrefix returns a handler that serves HTTP requests 1971 // by removing the given prefix from the request URL's Path 1972 // and invoking the handler h. StripPrefix handles a 1973 // request for a path that doesn't begin with prefix by 1974 // replying with an HTTP 404 not found error. 1975 func StripPrefix(prefix string, h Handler) Handler { 1976 if prefix == "" { 1977 return h 1978 } 1979 return HandlerFunc(func(w ResponseWriter, r *Request) { 1980 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 1981 r2 := new(Request) 1982 *r2 = *r 1983 r2.URL = new(url.URL) 1984 *r2.URL = *r.URL 1985 r2.URL.Path = p 1986 h.ServeHTTP(w, r2) 1987 } else { 1988 NotFound(w, r) 1989 } 1990 }) 1991 } 1992 1993 // Redirect replies to the request with a redirect to url, 1994 // which may be a path relative to the request path. 1995 // 1996 // The provided code should be in the 3xx range and is usually 1997 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1998 func Redirect(w ResponseWriter, r *Request, url string, code int) { 1999 // parseURL is just url.Parse (url is shadowed for godoc). 2000 if u, err := parseURL(url); err == nil { 2001 // If url was relative, make absolute by 2002 // combining with request path. 2003 // The browser would probably do this for us, 2004 // but doing it ourselves is more reliable. 2005 2006 // NOTE(rsc): RFC 2616 says that the Location 2007 // line must be an absolute URI, like 2008 // "http://www.google.com/redirect/", 2009 // not a path like "/redirect/". 2010 // Unfortunately, we don't know what to 2011 // put in the host name section to get the 2012 // client to connect to us again, so we can't 2013 // know the right absolute URI to send back. 2014 // Because of this problem, no one pays attention 2015 // to the RFC; they all send back just a new path. 2016 // So do we. 2017 if u.Scheme == "" && u.Host == "" { 2018 oldpath := r.URL.Path 2019 if oldpath == "" { // should not happen, but avoid a crash if it does 2020 oldpath = "/" 2021 } 2022 2023 // no leading http://server 2024 if url == "" || url[0] != '/' { 2025 // make relative path absolute 2026 olddir, _ := path.Split(oldpath) 2027 url = olddir + url 2028 } 2029 2030 var query string 2031 if i := strings.Index(url, "?"); i != -1 { 2032 url, query = url[:i], url[i:] 2033 } 2034 2035 // clean up but preserve trailing slash 2036 trailing := strings.HasSuffix(url, "/") 2037 url = path.Clean(url) 2038 if trailing && !strings.HasSuffix(url, "/") { 2039 url += "/" 2040 } 2041 url += query 2042 } 2043 } 2044 2045 w.Header().Set("Location", hexEscapeNonASCII(url)) 2046 if r.Method == "GET" || r.Method == "HEAD" { 2047 w.Header().Set("Content-Type", "text/html; charset=utf-8") 2048 } 2049 w.WriteHeader(code) 2050 2051 // RFC 2616 recommends that a short note "SHOULD" be included in the 2052 // response because older user agents may not understand 301/307. 2053 // Shouldn't send the response for POST or HEAD; that leaves GET. 2054 if r.Method == "GET" { 2055 note := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n" 2056 fmt.Fprintln(w, note) 2057 } 2058 } 2059 2060 // parseURL is just url.Parse. It exists only so that url.Parse can be called 2061 // in places where url is shadowed for godoc. See https://golang.org/cl/49930. 2062 var parseURL = url.Parse 2063 2064 var htmlReplacer = strings.NewReplacer( 2065 "&", "&", 2066 "<", "<", 2067 ">", ">", 2068 // """ is shorter than """. 2069 `"`, """, 2070 // "'" is shorter than "'" and apos was not in HTML until HTML5. 2071 "'", "'", 2072 ) 2073 2074 func htmlEscape(s string) string { 2075 return htmlReplacer.Replace(s) 2076 } 2077 2078 // Redirect to a fixed URL 2079 type redirectHandler struct { 2080 url string 2081 code int 2082 } 2083 2084 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 2085 Redirect(w, r, rh.url, rh.code) 2086 } 2087 2088 // RedirectHandler returns a request handler that redirects 2089 // each request it receives to the given url using the given 2090 // status code. 2091 // 2092 // The provided code should be in the 3xx range and is usually 2093 // StatusMovedPermanently, StatusFound or StatusSeeOther. 2094 func RedirectHandler(url string, code int) Handler { 2095 return &redirectHandler{url, code} 2096 } 2097 2098 // ServeMux is an HTTP request multiplexer. 2099 // It matches the URL of each incoming request against a list of registered 2100 // patterns and calls the handler for the pattern that 2101 // most closely matches the URL. 2102 // 2103 // Patterns name fixed, rooted paths, like "/favicon.ico", 2104 // or rooted subtrees, like "/images/" (note the trailing slash). 2105 // Longer patterns take precedence over shorter ones, so that 2106 // if there are handlers registered for both "/images/" 2107 // and "/images/thumbnails/", the latter handler will be 2108 // called for paths beginning "/images/thumbnails/" and the 2109 // former will receive requests for any other paths in the 2110 // "/images/" subtree. 2111 // 2112 // Note that since a pattern ending in a slash names a rooted subtree, 2113 // the pattern "/" matches all paths not matched by other registered 2114 // patterns, not just the URL with Path == "/". 2115 // 2116 // If a subtree has been registered and a request is received naming the 2117 // subtree root without its trailing slash, ServeMux redirects that 2118 // request to the subtree root (adding the trailing slash). This behavior can 2119 // be overridden with a separate registration for the path without 2120 // the trailing slash. For example, registering "/images/" causes ServeMux 2121 // to redirect a request for "/images" to "/images/", unless "/images" has 2122 // been registered separately. 2123 // 2124 // Patterns may optionally begin with a host name, restricting matches to 2125 // URLs on that host only. Host-specific patterns take precedence over 2126 // general patterns, so that a handler might register for the two patterns 2127 // "/codesearch" and "codesearch.google.com/" without also taking over 2128 // requests for "http://www.google.com/". 2129 // 2130 // ServeMux also takes care of sanitizing the URL request path, 2131 // redirecting any request containing . or .. elements or repeated slashes 2132 // to an equivalent, cleaner URL. 2133 type ServeMux struct { 2134 mu sync.RWMutex 2135 m map[string]muxEntry 2136 hosts bool // whether any patterns contain hostnames 2137 } 2138 2139 type muxEntry struct { 2140 h Handler 2141 pattern string 2142 } 2143 2144 // NewServeMux allocates and returns a new ServeMux. 2145 func NewServeMux() *ServeMux { return new(ServeMux) } 2146 2147 // DefaultServeMux is the default ServeMux used by Serve. 2148 var DefaultServeMux = &defaultServeMux 2149 2150 var defaultServeMux ServeMux 2151 2152 // Does path match pattern? 2153 func pathMatch(pattern, path string) bool { 2154 if len(pattern) == 0 { 2155 // should not happen 2156 return false 2157 } 2158 n := len(pattern) 2159 if pattern[n-1] != '/' { 2160 return pattern == path 2161 } 2162 return len(path) >= n && path[0:n] == pattern 2163 } 2164 2165 // Return the canonical path for p, eliminating . and .. elements. 2166 func cleanPath(p string) string { 2167 if p == "" { 2168 return "/" 2169 } 2170 if p[0] != '/' { 2171 p = "/" + p 2172 } 2173 np := path.Clean(p) 2174 // path.Clean removes trailing slash except for root; 2175 // put the trailing slash back if necessary. 2176 if p[len(p)-1] == '/' && np != "/" { 2177 np += "/" 2178 } 2179 return np 2180 } 2181 2182 // stripHostPort returns h without any trailing ":<port>". 2183 func stripHostPort(h string) string { 2184 // If no port on host, return unchanged 2185 if strings.IndexByte(h, ':') == -1 { 2186 return h 2187 } 2188 host, _, err := net.SplitHostPort(h) 2189 if err != nil { 2190 return h // on error, return unchanged 2191 } 2192 return host 2193 } 2194 2195 // Find a handler on a handler map given a path string. 2196 // Most-specific (longest) pattern wins. 2197 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 2198 // Check for exact match first. 2199 v, ok := mux.m[path] 2200 if ok { 2201 return v.h, v.pattern 2202 } 2203 2204 // Check for longest valid match. 2205 var n = 0 2206 for k, v := range mux.m { 2207 if !pathMatch(k, path) { 2208 continue 2209 } 2210 if h == nil || len(k) > n { 2211 n = len(k) 2212 h = v.h 2213 pattern = v.pattern 2214 } 2215 } 2216 return 2217 } 2218 2219 // redirectToPathSlash determines if the given path needs appending "/" to it. 2220 // This occurs when a handler for path + "/" was already registered, but 2221 // not for path itself. If the path needs appending to, it creates a new 2222 // URL, setting the path to u.Path + "/" and returning true to indicate so. 2223 func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) { 2224 if !mux.shouldRedirect(host, path) { 2225 return u, false 2226 } 2227 path = path + "/" 2228 u = &url.URL{Path: path, RawQuery: u.RawQuery} 2229 return u, true 2230 } 2231 2232 // shouldRedirect reports whether the given path and host should be redirected to 2233 // path+"/". This should happen if a handler is registered for path+"/" but 2234 // not path -- see comments at ServeMux. 2235 func (mux *ServeMux) shouldRedirect(host, path string) bool { 2236 p := []string{path, host + path} 2237 2238 for _, c := range p { 2239 if _, exist := mux.m[c]; exist { 2240 return false 2241 } 2242 } 2243 2244 n := len(path) 2245 if n == 0 { 2246 return false 2247 } 2248 for _, c := range p { 2249 if _, exist := mux.m[c+"/"]; exist { 2250 return path[n-1] != '/' 2251 } 2252 } 2253 2254 return false 2255 } 2256 2257 // Handler returns the handler to use for the given request, 2258 // consulting r.Method, r.Host, and r.URL.Path. It always returns 2259 // a non-nil handler. If the path is not in its canonical form, the 2260 // handler will be an internally-generated handler that redirects 2261 // to the canonical path. If the host contains a port, it is ignored 2262 // when matching handlers. 2263 // 2264 // The path and host are used unchanged for CONNECT requests. 2265 // 2266 // Handler also returns the registered pattern that matches the 2267 // request or, in the case of internally-generated redirects, 2268 // the pattern that will match after following the redirect. 2269 // 2270 // If there is no registered handler that applies to the request, 2271 // Handler returns a ``page not found'' handler and an empty pattern. 2272 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 2273 2274 // CONNECT requests are not canonicalized. 2275 if r.Method == "CONNECT" { 2276 // If r.URL.Path is /tree and its handler is not registered, 2277 // the /tree -> /tree/ redirect applies to CONNECT requests 2278 // but the path canonicalization does not. 2279 if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok { 2280 return RedirectHandler(u.String(), StatusMovedPermanently), u.Path 2281 } 2282 2283 return mux.handler(r.Host, r.URL.Path) 2284 } 2285 2286 // All other requests have any port stripped and path cleaned 2287 // before passing to mux.handler. 2288 host := stripHostPort(r.Host) 2289 path := cleanPath(r.URL.Path) 2290 2291 // If the given path is /tree and its handler is not registered, 2292 // redirect for /tree/. 2293 if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok { 2294 return RedirectHandler(u.String(), StatusMovedPermanently), u.Path 2295 } 2296 2297 if path != r.URL.Path { 2298 _, pattern = mux.handler(host, path) 2299 url := *r.URL 2300 url.Path = path 2301 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 2302 } 2303 2304 return mux.handler(host, r.URL.Path) 2305 } 2306 2307 // handler is the main implementation of Handler. 2308 // The path is known to be in canonical form, except for CONNECT methods. 2309 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 2310 mux.mu.RLock() 2311 defer mux.mu.RUnlock() 2312 2313 // Host-specific pattern takes precedence over generic ones 2314 if mux.hosts { 2315 h, pattern = mux.match(host + path) 2316 } 2317 if h == nil { 2318 h, pattern = mux.match(path) 2319 } 2320 if h == nil { 2321 h, pattern = NotFoundHandler(), "" 2322 } 2323 return 2324 } 2325 2326 // ServeHTTP dispatches the request to the handler whose 2327 // pattern most closely matches the request URL. 2328 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 2329 if r.RequestURI == "*" { 2330 if r.ProtoAtLeast(1, 1) { 2331 w.Header().Set("Connection", "close") 2332 } 2333 w.WriteHeader(StatusBadRequest) 2334 return 2335 } 2336 h, _ := mux.Handler(r) 2337 h.ServeHTTP(w, r) 2338 } 2339 2340 // Handle registers the handler for the given pattern. 2341 // If a handler already exists for pattern, Handle panics. 2342 func (mux *ServeMux) Handle(pattern string, handler Handler) { 2343 mux.mu.Lock() 2344 defer mux.mu.Unlock() 2345 2346 if pattern == "" { 2347 panic("http: invalid pattern") 2348 } 2349 if handler == nil { 2350 panic("http: nil handler") 2351 } 2352 if _, exist := mux.m[pattern]; exist { 2353 panic("http: multiple registrations for " + pattern) 2354 } 2355 2356 if mux.m == nil { 2357 mux.m = make(map[string]muxEntry) 2358 } 2359 mux.m[pattern] = muxEntry{h: handler, pattern: pattern} 2360 2361 if pattern[0] != '/' { 2362 mux.hosts = true 2363 } 2364 } 2365 2366 // HandleFunc registers the handler function for the given pattern. 2367 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2368 mux.Handle(pattern, HandlerFunc(handler)) 2369 } 2370 2371 // Handle registers the handler for the given pattern 2372 // in the DefaultServeMux. 2373 // The documentation for ServeMux explains how patterns are matched. 2374 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 2375 2376 // HandleFunc registers the handler function for the given pattern 2377 // in the DefaultServeMux. 2378 // The documentation for ServeMux explains how patterns are matched. 2379 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2380 DefaultServeMux.HandleFunc(pattern, handler) 2381 } 2382 2383 // Serve accepts incoming HTTP connections on the listener l, 2384 // creating a new service goroutine for each. The service goroutines 2385 // read requests and then call handler to reply to them. 2386 // Handler is typically nil, in which case the DefaultServeMux is used. 2387 func Serve(l net.Listener, handler Handler) error { 2388 srv := &Server{Handler: handler} 2389 return srv.Serve(l) 2390 } 2391 2392 // ServeTLS accepts incoming HTTPS connections on the listener l, 2393 // creating a new service goroutine for each. The service goroutines 2394 // read requests and then call handler to reply to them. 2395 // 2396 // Handler is typically nil, in which case the DefaultServeMux is used. 2397 // 2398 // Additionally, files containing a certificate and matching private key 2399 // for the server must be provided. If the certificate is signed by a 2400 // certificate authority, the certFile should be the concatenation 2401 // of the server's certificate, any intermediates, and the CA's certificate. 2402 func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error { 2403 srv := &Server{Handler: handler} 2404 return srv.ServeTLS(l, certFile, keyFile) 2405 } 2406 2407 // A Server defines parameters for running an HTTP server. 2408 // The zero value for Server is a valid configuration. 2409 type Server struct { 2410 Addr string // TCP address to listen on, ":http" if empty 2411 Handler Handler // handler to invoke, http.DefaultServeMux if nil 2412 2413 // TLSConfig optionally provides a TLS configuration for use 2414 // by ServeTLS and ListenAndServeTLS. Note that this value is 2415 // cloned by ServeTLS and ListenAndServeTLS, so it's not 2416 // possible to modify the configuration with methods like 2417 // tls.Config.SetSessionTicketKeys. To use 2418 // SetSessionTicketKeys, use Server.Serve with a TLS Listener 2419 // instead. 2420 TLSConfig *tls.Config 2421 2422 // ReadTimeout is the maximum duration for reading the entire 2423 // request, including the body. 2424 // 2425 // Because ReadTimeout does not let Handlers make per-request 2426 // decisions on each request body's acceptable deadline or 2427 // upload rate, most users will prefer to use 2428 // ReadHeaderTimeout. It is valid to use them both. 2429 ReadTimeout time.Duration 2430 2431 // ReadHeaderTimeout is the amount of time allowed to read 2432 // request headers. The connection's read deadline is reset 2433 // after reading the headers and the Handler can decide what 2434 // is considered too slow for the body. 2435 ReadHeaderTimeout time.Duration 2436 2437 // WriteTimeout is the maximum duration before timing out 2438 // writes of the response. It is reset whenever a new 2439 // request's header is read. Like ReadTimeout, it does not 2440 // let Handlers make decisions on a per-request basis. 2441 WriteTimeout time.Duration 2442 2443 // IdleTimeout is the maximum amount of time to wait for the 2444 // next request when keep-alives are enabled. If IdleTimeout 2445 // is zero, the value of ReadTimeout is used. If both are 2446 // zero, ReadHeaderTimeout is used. 2447 IdleTimeout time.Duration 2448 2449 // MaxHeaderBytes controls the maximum number of bytes the 2450 // server will read parsing the request header's keys and 2451 // values, including the request line. It does not limit the 2452 // size of the request body. 2453 // If zero, DefaultMaxHeaderBytes is used. 2454 MaxHeaderBytes int 2455 2456 // TLSNextProto optionally specifies a function to take over 2457 // ownership of the provided TLS connection when an NPN/ALPN 2458 // protocol upgrade has occurred. The map key is the protocol 2459 // name negotiated. The Handler argument should be used to 2460 // handle HTTP requests and will initialize the Request's TLS 2461 // and RemoteAddr if not already set. The connection is 2462 // automatically closed when the function returns. 2463 // If TLSNextProto is not nil, HTTP/2 support is not enabled 2464 // automatically. 2465 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 2466 2467 // ConnState specifies an optional callback function that is 2468 // called when a client connection changes state. See the 2469 // ConnState type and associated constants for details. 2470 ConnState func(net.Conn, ConnState) 2471 2472 // ErrorLog specifies an optional logger for errors accepting 2473 // connections, unexpected behavior from handlers, and 2474 // underlying FileSystem errors. 2475 // If nil, logging is done via the log package's standard logger. 2476 ErrorLog *log.Logger 2477 2478 disableKeepAlives int32 // accessed atomically. 2479 inShutdown int32 // accessed atomically (non-zero means we're in Shutdown) 2480 nextProtoOnce sync.Once // guards setupHTTP2_* init 2481 nextProtoErr error // result of http2.ConfigureServer if used 2482 2483 mu sync.Mutex 2484 listeners map[net.Listener]struct{} 2485 activeConn map[*conn]struct{} 2486 doneChan chan struct{} 2487 onShutdown []func() 2488 } 2489 2490 func (s *Server) getDoneChan() <-chan struct{} { 2491 s.mu.Lock() 2492 defer s.mu.Unlock() 2493 return s.getDoneChanLocked() 2494 } 2495 2496 func (s *Server) getDoneChanLocked() chan struct{} { 2497 if s.doneChan == nil { 2498 s.doneChan = make(chan struct{}) 2499 } 2500 return s.doneChan 2501 } 2502 2503 func (s *Server) closeDoneChanLocked() { 2504 ch := s.getDoneChanLocked() 2505 select { 2506 case <-ch: 2507 // Already closed. Don't close again. 2508 default: 2509 // Safe to close here. We're the only closer, guarded 2510 // by s.mu. 2511 close(ch) 2512 } 2513 } 2514 2515 // Close immediately closes all active net.Listeners and any 2516 // connections in state StateNew, StateActive, or StateIdle. For a 2517 // graceful shutdown, use Shutdown. 2518 // 2519 // Close does not attempt to close (and does not even know about) 2520 // any hijacked connections, such as WebSockets. 2521 // 2522 // Close returns any error returned from closing the Server's 2523 // underlying Listener(s). 2524 func (srv *Server) Close() error { 2525 srv.mu.Lock() 2526 defer srv.mu.Unlock() 2527 srv.closeDoneChanLocked() 2528 err := srv.closeListenersLocked() 2529 for c := range srv.activeConn { 2530 c.rwc.Close() 2531 delete(srv.activeConn, c) 2532 } 2533 return err 2534 } 2535 2536 // shutdownPollInterval is how often we poll for quiescence 2537 // during Server.Shutdown. This is lower during tests, to 2538 // speed up tests. 2539 // Ideally we could find a solution that doesn't involve polling, 2540 // but which also doesn't have a high runtime cost (and doesn't 2541 // involve any contentious mutexes), but that is left as an 2542 // exercise for the reader. 2543 var shutdownPollInterval = 500 * time.Millisecond 2544 2545 // Shutdown gracefully shuts down the server without interrupting any 2546 // active connections. Shutdown works by first closing all open 2547 // listeners, then closing all idle connections, and then waiting 2548 // indefinitely for connections to return to idle and then shut down. 2549 // If the provided context expires before the shutdown is complete, 2550 // Shutdown returns the context's error, otherwise it returns any 2551 // error returned from closing the Server's underlying Listener(s). 2552 // 2553 // When Shutdown is called, Serve, ListenAndServe, and 2554 // ListenAndServeTLS immediately return ErrServerClosed. Make sure the 2555 // program doesn't exit and waits instead for Shutdown to return. 2556 // 2557 // Shutdown does not attempt to close nor wait for hijacked 2558 // connections such as WebSockets. The caller of Shutdown should 2559 // separately notify such long-lived connections of shutdown and wait 2560 // for them to close, if desired. See RegisterOnShutdown for a way to 2561 // register shutdown notification functions. 2562 func (srv *Server) Shutdown(ctx context.Context) error { 2563 atomic.AddInt32(&srv.inShutdown, 1) 2564 defer atomic.AddInt32(&srv.inShutdown, -1) 2565 2566 srv.mu.Lock() 2567 lnerr := srv.closeListenersLocked() 2568 srv.closeDoneChanLocked() 2569 for _, f := range srv.onShutdown { 2570 go f() 2571 } 2572 srv.mu.Unlock() 2573 2574 ticker := time.NewTicker(shutdownPollInterval) 2575 defer ticker.Stop() 2576 for { 2577 if srv.closeIdleConns() { 2578 return lnerr 2579 } 2580 select { 2581 case <-ctx.Done(): 2582 return ctx.Err() 2583 case <-ticker.C: 2584 } 2585 } 2586 } 2587 2588 // RegisterOnShutdown registers a function to call on Shutdown. 2589 // This can be used to gracefully shutdown connections that have 2590 // undergone NPN/ALPN protocol upgrade or that have been hijacked. 2591 // This function should start protocol-specific graceful shutdown, 2592 // but should not wait for shutdown to complete. 2593 func (srv *Server) RegisterOnShutdown(f func()) { 2594 srv.mu.Lock() 2595 srv.onShutdown = append(srv.onShutdown, f) 2596 srv.mu.Unlock() 2597 } 2598 2599 // closeIdleConns closes all idle connections and reports whether the 2600 // server is quiescent. 2601 func (s *Server) closeIdleConns() bool { 2602 s.mu.Lock() 2603 defer s.mu.Unlock() 2604 quiescent := true 2605 for c := range s.activeConn { 2606 st, ok := c.curState.Load().(ConnState) 2607 if !ok || st != StateIdle { 2608 quiescent = false 2609 continue 2610 } 2611 c.rwc.Close() 2612 delete(s.activeConn, c) 2613 } 2614 return quiescent 2615 } 2616 2617 func (s *Server) closeListenersLocked() error { 2618 var err error 2619 for ln := range s.listeners { 2620 if cerr := ln.Close(); cerr != nil && err == nil { 2621 err = cerr 2622 } 2623 delete(s.listeners, ln) 2624 } 2625 return err 2626 } 2627 2628 // A ConnState represents the state of a client connection to a server. 2629 // It's used by the optional Server.ConnState hook. 2630 type ConnState int 2631 2632 const ( 2633 // StateNew represents a new connection that is expected to 2634 // send a request immediately. Connections begin at this 2635 // state and then transition to either StateActive or 2636 // StateClosed. 2637 StateNew ConnState = iota 2638 2639 // StateActive represents a connection that has read 1 or more 2640 // bytes of a request. The Server.ConnState hook for 2641 // StateActive fires before the request has entered a handler 2642 // and doesn't fire again until the request has been 2643 // handled. After the request is handled, the state 2644 // transitions to StateClosed, StateHijacked, or StateIdle. 2645 // For HTTP/2, StateActive fires on the transition from zero 2646 // to one active request, and only transitions away once all 2647 // active requests are complete. That means that ConnState 2648 // cannot be used to do per-request work; ConnState only notes 2649 // the overall state of the connection. 2650 StateActive 2651 2652 // StateIdle represents a connection that has finished 2653 // handling a request and is in the keep-alive state, waiting 2654 // for a new request. Connections transition from StateIdle 2655 // to either StateActive or StateClosed. 2656 StateIdle 2657 2658 // StateHijacked represents a hijacked connection. 2659 // This is a terminal state. It does not transition to StateClosed. 2660 StateHijacked 2661 2662 // StateClosed represents a closed connection. 2663 // This is a terminal state. Hijacked connections do not 2664 // transition to StateClosed. 2665 StateClosed 2666 ) 2667 2668 var stateName = map[ConnState]string{ 2669 StateNew: "new", 2670 StateActive: "active", 2671 StateIdle: "idle", 2672 StateHijacked: "hijacked", 2673 StateClosed: "closed", 2674 } 2675 2676 func (c ConnState) String() string { 2677 return stateName[c] 2678 } 2679 2680 // serverHandler delegates to either the server's Handler or 2681 // DefaultServeMux and also handles "OPTIONS *" requests. 2682 type serverHandler struct { 2683 srv *Server 2684 } 2685 2686 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2687 handler := sh.srv.Handler 2688 if handler == nil { 2689 handler = DefaultServeMux 2690 } 2691 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2692 handler = globalOptionsHandler{} 2693 } 2694 handler.ServeHTTP(rw, req) 2695 } 2696 2697 // ListenAndServe listens on the TCP network address srv.Addr and then 2698 // calls Serve to handle requests on incoming connections. 2699 // Accepted connections are configured to enable TCP keep-alives. 2700 // If srv.Addr is blank, ":http" is used. 2701 // ListenAndServe always returns a non-nil error. 2702 func (srv *Server) ListenAndServe() error { 2703 addr := srv.Addr 2704 if addr == "" { 2705 addr = ":http" 2706 } 2707 ln, err := net.Listen("tcp", addr) 2708 if err != nil { 2709 return err 2710 } 2711 return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 2712 } 2713 2714 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2715 2716 // shouldDoServeHTTP2 reports whether Server.Serve should configure 2717 // automatic HTTP/2. (which sets up the srv.TLSNextProto map) 2718 func (srv *Server) shouldConfigureHTTP2ForServe() bool { 2719 if srv.TLSConfig == nil { 2720 // Compatibility with Go 1.6: 2721 // If there's no TLSConfig, it's possible that the user just 2722 // didn't set it on the http.Server, but did pass it to 2723 // tls.NewListener and passed that listener to Serve. 2724 // So we should configure HTTP/2 (to set up srv.TLSNextProto) 2725 // in case the listener returns an "h2" *tls.Conn. 2726 return true 2727 } 2728 // The user specified a TLSConfig on their http.Server. 2729 // In this, case, only configure HTTP/2 if their tls.Config 2730 // explicitly mentions "h2". Otherwise http2.ConfigureServer 2731 // would modify the tls.Config to add it, but they probably already 2732 // passed this tls.Config to tls.NewListener. And if they did, 2733 // it's too late anyway to fix it. It would only be potentially racy. 2734 // See Issue 15908. 2735 return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) 2736 } 2737 2738 // ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe, 2739 // and ListenAndServeTLS methods after a call to Shutdown or Close. 2740 var ErrServerClosed = errors.New("http: Server closed") 2741 2742 // Serve accepts incoming connections on the Listener l, creating a 2743 // new service goroutine for each. The service goroutines read requests and 2744 // then call srv.Handler to reply to them. 2745 // 2746 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2747 // provided listener's TLS Config before calling Serve. If 2748 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2749 // Config.NextProtos, HTTP/2 support is not enabled. 2750 // 2751 // Serve always returns a non-nil error. After Shutdown or Close, the 2752 // returned error is ErrServerClosed. 2753 func (srv *Server) Serve(l net.Listener) error { 2754 defer l.Close() 2755 if fn := testHookServerServe; fn != nil { 2756 fn(srv, l) 2757 } 2758 var tempDelay time.Duration // how long to sleep on accept failure 2759 2760 if err := srv.setupHTTP2_Serve(); err != nil { 2761 return err 2762 } 2763 2764 srv.trackListener(l, true) 2765 defer srv.trackListener(l, false) 2766 2767 baseCtx := context.Background() // base is always background, per Issue 16220 2768 ctx := context.WithValue(baseCtx, ServerContextKey, srv) 2769 for { 2770 rw, e := l.Accept() 2771 if e != nil { 2772 select { 2773 case <-srv.getDoneChan(): 2774 return ErrServerClosed 2775 default: 2776 } 2777 if ne, ok := e.(net.Error); ok && ne.Temporary() { 2778 if tempDelay == 0 { 2779 tempDelay = 5 * time.Millisecond 2780 } else { 2781 tempDelay *= 2 2782 } 2783 if max := 1 * time.Second; tempDelay > max { 2784 tempDelay = max 2785 } 2786 srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) 2787 time.Sleep(tempDelay) 2788 continue 2789 } 2790 return e 2791 } 2792 tempDelay = 0 2793 c := srv.newConn(rw) 2794 c.setState(c.rwc, StateNew) // before Serve can return 2795 go c.serve(ctx) 2796 } 2797 } 2798 2799 // ServeTLS accepts incoming connections on the Listener l, creating a 2800 // new service goroutine for each. The service goroutines read requests and 2801 // then call srv.Handler to reply to them. 2802 // 2803 // Additionally, files containing a certificate and matching private key for 2804 // the server must be provided if neither the Server's TLSConfig.Certificates 2805 // nor TLSConfig.GetCertificate are populated.. If the certificate is signed by 2806 // a certificate authority, the certFile should be the concatenation of the 2807 // server's certificate, any intermediates, and the CA's certificate. 2808 // 2809 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2810 // provided listener's TLS Config before calling ServeTLS. If 2811 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2812 // Config.NextProtos, HTTP/2 support is not enabled. 2813 // 2814 // ServeTLS always returns a non-nil error. After Shutdown or Close, the 2815 // returned error is ErrServerClosed. 2816 func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error { 2817 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2818 // before we clone it and create the TLS Listener. 2819 if err := srv.setupHTTP2_ServeTLS(); err != nil { 2820 return err 2821 } 2822 2823 config := cloneTLSConfig(srv.TLSConfig) 2824 if !strSliceContains(config.NextProtos, "http/1.1") { 2825 config.NextProtos = append(config.NextProtos, "http/1.1") 2826 } 2827 2828 configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil 2829 if !configHasCert || certFile != "" || keyFile != "" { 2830 var err error 2831 config.Certificates = make([]tls.Certificate, 1) 2832 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2833 if err != nil { 2834 return err 2835 } 2836 } 2837 2838 tlsListener := tls.NewListener(l, config) 2839 return srv.Serve(tlsListener) 2840 } 2841 2842 func (s *Server) trackListener(ln net.Listener, add bool) { 2843 s.mu.Lock() 2844 defer s.mu.Unlock() 2845 if s.listeners == nil { 2846 s.listeners = make(map[net.Listener]struct{}) 2847 } 2848 if add { 2849 // If the *Server is being reused after a previous 2850 // Close or Shutdown, reset its doneChan: 2851 if len(s.listeners) == 0 && len(s.activeConn) == 0 { 2852 s.doneChan = nil 2853 } 2854 s.listeners[ln] = struct{}{} 2855 } else { 2856 delete(s.listeners, ln) 2857 } 2858 } 2859 2860 func (s *Server) trackConn(c *conn, add bool) { 2861 s.mu.Lock() 2862 defer s.mu.Unlock() 2863 if s.activeConn == nil { 2864 s.activeConn = make(map[*conn]struct{}) 2865 } 2866 if add { 2867 s.activeConn[c] = struct{}{} 2868 } else { 2869 delete(s.activeConn, c) 2870 } 2871 } 2872 2873 func (s *Server) idleTimeout() time.Duration { 2874 if s.IdleTimeout != 0 { 2875 return s.IdleTimeout 2876 } 2877 return s.ReadTimeout 2878 } 2879 2880 func (s *Server) readHeaderTimeout() time.Duration { 2881 if s.ReadHeaderTimeout != 0 { 2882 return s.ReadHeaderTimeout 2883 } 2884 return s.ReadTimeout 2885 } 2886 2887 func (s *Server) doKeepAlives() bool { 2888 return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown() 2889 } 2890 2891 func (s *Server) shuttingDown() bool { 2892 return atomic.LoadInt32(&s.inShutdown) != 0 2893 } 2894 2895 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 2896 // By default, keep-alives are always enabled. Only very 2897 // resource-constrained environments or servers in the process of 2898 // shutting down should disable them. 2899 func (srv *Server) SetKeepAlivesEnabled(v bool) { 2900 if v { 2901 atomic.StoreInt32(&srv.disableKeepAlives, 0) 2902 return 2903 } 2904 atomic.StoreInt32(&srv.disableKeepAlives, 1) 2905 2906 // Close idle HTTP/1 conns: 2907 srv.closeIdleConns() 2908 2909 // Close HTTP/2 conns, as soon as they become idle, but reset 2910 // the chan so future conns (if the listener is still active) 2911 // still work and don't get a GOAWAY immediately, before their 2912 // first request: 2913 srv.mu.Lock() 2914 defer srv.mu.Unlock() 2915 srv.closeDoneChanLocked() // closes http2 conns 2916 srv.doneChan = nil 2917 } 2918 2919 func (s *Server) logf(format string, args ...interface{}) { 2920 if s.ErrorLog != nil { 2921 s.ErrorLog.Printf(format, args...) 2922 } else { 2923 log.Printf(format, args...) 2924 } 2925 } 2926 2927 // logf prints to the ErrorLog of the *Server associated with request r 2928 // via ServerContextKey. If there's no associated server, or if ErrorLog 2929 // is nil, logging is done via the log package's standard logger. 2930 func logf(r *Request, format string, args ...interface{}) { 2931 s, _ := r.Context().Value(ServerContextKey).(*Server) 2932 if s != nil && s.ErrorLog != nil { 2933 s.ErrorLog.Printf(format, args...) 2934 } else { 2935 log.Printf(format, args...) 2936 } 2937 } 2938 2939 // ListenAndServe listens on the TCP network address addr 2940 // and then calls Serve with handler to handle requests 2941 // on incoming connections. 2942 // Accepted connections are configured to enable TCP keep-alives. 2943 // Handler is typically nil, in which case the DefaultServeMux is 2944 // used. 2945 // 2946 // A trivial example server is: 2947 // 2948 // package main 2949 // 2950 // import ( 2951 // "io" 2952 // "net/http" 2953 // "log" 2954 // ) 2955 // 2956 // // hello world, the web server 2957 // func HelloServer(w http.ResponseWriter, req *http.Request) { 2958 // io.WriteString(w, "hello, world!\n") 2959 // } 2960 // 2961 // func main() { 2962 // http.HandleFunc("/hello", HelloServer) 2963 // log.Fatal(http.ListenAndServe(":12345", nil)) 2964 // } 2965 // 2966 // ListenAndServe always returns a non-nil error. 2967 func ListenAndServe(addr string, handler Handler) error { 2968 server := &Server{Addr: addr, Handler: handler} 2969 return server.ListenAndServe() 2970 } 2971 2972 // ListenAndServeTLS acts identically to ListenAndServe, except that it 2973 // expects HTTPS connections. Additionally, files containing a certificate and 2974 // matching private key for the server must be provided. If the certificate 2975 // is signed by a certificate authority, the certFile should be the concatenation 2976 // of the server's certificate, any intermediates, and the CA's certificate. 2977 // 2978 // A trivial example server is: 2979 // 2980 // import ( 2981 // "log" 2982 // "net/http" 2983 // ) 2984 // 2985 // func handler(w http.ResponseWriter, req *http.Request) { 2986 // w.Header().Set("Content-Type", "text/plain") 2987 // w.Write([]byte("This is an example server.\n")) 2988 // } 2989 // 2990 // func main() { 2991 // http.HandleFunc("/", handler) 2992 // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") 2993 // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) 2994 // log.Fatal(err) 2995 // } 2996 // 2997 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. 2998 // 2999 // ListenAndServeTLS always returns a non-nil error. 3000 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 3001 server := &Server{Addr: addr, Handler: handler} 3002 return server.ListenAndServeTLS(certFile, keyFile) 3003 } 3004 3005 // ListenAndServeTLS listens on the TCP network address srv.Addr and 3006 // then calls Serve to handle requests on incoming TLS connections. 3007 // Accepted connections are configured to enable TCP keep-alives. 3008 // 3009 // Filenames containing a certificate and matching private key for the 3010 // server must be provided if neither the Server's TLSConfig.Certificates 3011 // nor TLSConfig.GetCertificate are populated. If the certificate is 3012 // signed by a certificate authority, the certFile should be the 3013 // concatenation of the server's certificate, any intermediates, and 3014 // the CA's certificate. 3015 // 3016 // If srv.Addr is blank, ":https" is used. 3017 // 3018 // ListenAndServeTLS always returns a non-nil error. 3019 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 3020 addr := srv.Addr 3021 if addr == "" { 3022 addr = ":https" 3023 } 3024 3025 ln, err := net.Listen("tcp", addr) 3026 if err != nil { 3027 return err 3028 } 3029 3030 defer ln.Close() 3031 3032 return srv.ServeTLS(tcpKeepAliveListener{ln.(*net.TCPListener)}, certFile, keyFile) 3033 } 3034 3035 // setupHTTP2_ServeTLS conditionally configures HTTP/2 on 3036 // srv and returns whether there was an error setting it up. If it is 3037 // not configured for policy reasons, nil is returned. 3038 func (srv *Server) setupHTTP2_ServeTLS() error { 3039 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 3040 return srv.nextProtoErr 3041 } 3042 3043 // setupHTTP2_Serve is called from (*Server).Serve and conditionally 3044 // configures HTTP/2 on srv using a more conservative policy than 3045 // setupHTTP2_ServeTLS because Serve may be called 3046 // concurrently. 3047 // 3048 // The tests named TestTransportAutomaticHTTP2* and 3049 // TestConcurrentServerServe in server_test.go demonstrate some 3050 // of the supported use cases and motivations. 3051 func (srv *Server) setupHTTP2_Serve() error { 3052 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve) 3053 return srv.nextProtoErr 3054 } 3055 3056 func (srv *Server) onceSetNextProtoDefaults_Serve() { 3057 if srv.shouldConfigureHTTP2ForServe() { 3058 srv.onceSetNextProtoDefaults() 3059 } 3060 } 3061 3062 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 3063 // configured otherwise. (by setting srv.TLSNextProto non-nil) 3064 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*). 3065 func (srv *Server) onceSetNextProtoDefaults() { 3066 if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { 3067 return 3068 } 3069 // Enable HTTP/2 by default if the user hasn't otherwise 3070 // configured their TLSNextProto map. 3071 if srv.TLSNextProto == nil { 3072 conf := &http2Server{ 3073 NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) }, 3074 } 3075 srv.nextProtoErr = http2ConfigureServer(srv, conf) 3076 } 3077 } 3078 3079 // TimeoutHandler returns a Handler that runs h with the given time limit. 3080 // 3081 // The new Handler calls h.ServeHTTP to handle each request, but if a 3082 // call runs for longer than its time limit, the handler responds with 3083 // a 503 Service Unavailable error and the given message in its body. 3084 // (If msg is empty, a suitable default message will be sent.) 3085 // After such a timeout, writes by h to its ResponseWriter will return 3086 // ErrHandlerTimeout. 3087 // 3088 // TimeoutHandler buffers all Handler writes to memory and does not 3089 // support the Hijacker or Flusher interfaces. 3090 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 3091 return &timeoutHandler{ 3092 handler: h, 3093 body: msg, 3094 dt: dt, 3095 } 3096 } 3097 3098 // ErrHandlerTimeout is returned on ResponseWriter Write calls 3099 // in handlers which have timed out. 3100 var ErrHandlerTimeout = errors.New("http: Handler timeout") 3101 3102 type timeoutHandler struct { 3103 handler Handler 3104 body string 3105 dt time.Duration 3106 3107 // When set, no context will be created and this context will 3108 // be used instead. 3109 testContext context.Context 3110 } 3111 3112 func (h *timeoutHandler) errorBody() string { 3113 if h.body != "" { 3114 return h.body 3115 } 3116 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 3117 } 3118 3119 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 3120 ctx := h.testContext 3121 if ctx == nil { 3122 var cancelCtx context.CancelFunc 3123 ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt) 3124 defer cancelCtx() 3125 } 3126 r = r.WithContext(ctx) 3127 done := make(chan struct{}) 3128 tw := &timeoutWriter{ 3129 w: w, 3130 h: make(Header), 3131 } 3132 panicChan := make(chan interface{}, 1) 3133 go func() { 3134 defer func() { 3135 if p := recover(); p != nil { 3136 panicChan <- p 3137 } 3138 }() 3139 h.handler.ServeHTTP(tw, r) 3140 close(done) 3141 }() 3142 select { 3143 case p := <-panicChan: 3144 panic(p) 3145 case <-done: 3146 tw.mu.Lock() 3147 defer tw.mu.Unlock() 3148 dst := w.Header() 3149 for k, vv := range tw.h { 3150 dst[k] = vv 3151 } 3152 if !tw.wroteHeader { 3153 tw.code = StatusOK 3154 } 3155 w.WriteHeader(tw.code) 3156 w.Write(tw.wbuf.Bytes()) 3157 case <-ctx.Done(): 3158 tw.mu.Lock() 3159 defer tw.mu.Unlock() 3160 w.WriteHeader(StatusServiceUnavailable) 3161 io.WriteString(w, h.errorBody()) 3162 tw.timedOut = true 3163 return 3164 } 3165 } 3166 3167 type timeoutWriter struct { 3168 w ResponseWriter 3169 h Header 3170 wbuf bytes.Buffer 3171 3172 mu sync.Mutex 3173 timedOut bool 3174 wroteHeader bool 3175 code int 3176 } 3177 3178 func (tw *timeoutWriter) Header() Header { return tw.h } 3179 3180 func (tw *timeoutWriter) Write(p []byte) (int, error) { 3181 tw.mu.Lock() 3182 defer tw.mu.Unlock() 3183 if tw.timedOut { 3184 return 0, ErrHandlerTimeout 3185 } 3186 if !tw.wroteHeader { 3187 tw.writeHeader(StatusOK) 3188 } 3189 return tw.wbuf.Write(p) 3190 } 3191 3192 func (tw *timeoutWriter) WriteHeader(code int) { 3193 checkWriteHeaderCode(code) 3194 tw.mu.Lock() 3195 defer tw.mu.Unlock() 3196 if tw.timedOut || tw.wroteHeader { 3197 return 3198 } 3199 tw.writeHeader(code) 3200 } 3201 3202 func (tw *timeoutWriter) writeHeader(code int) { 3203 tw.wroteHeader = true 3204 tw.code = code 3205 } 3206 3207 // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 3208 // connections. It's used by ListenAndServe and ListenAndServeTLS so 3209 // dead TCP connections (e.g. closing laptop mid-download) eventually 3210 // go away. 3211 type tcpKeepAliveListener struct { 3212 *net.TCPListener 3213 } 3214 3215 func (ln tcpKeepAliveListener) Accept() (net.Conn, error) { 3216 tc, err := ln.AcceptTCP() 3217 if err != nil { 3218 return nil, err 3219 } 3220 tc.SetKeepAlive(true) 3221 tc.SetKeepAlivePeriod(3 * time.Minute) 3222 return tc, nil 3223 } 3224 3225 // globalOptionsHandler responds to "OPTIONS *" requests. 3226 type globalOptionsHandler struct{} 3227 3228 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 3229 w.Header().Set("Content-Length", "0") 3230 if r.ContentLength != 0 { 3231 // Read up to 4KB of OPTIONS body (as mentioned in the 3232 // spec as being reserved for future use), but anything 3233 // over that is considered a waste of server resources 3234 // (or an attack) and we abort and close the connection, 3235 // courtesy of MaxBytesReader's EOF behavior. 3236 mb := MaxBytesReader(w, r.Body, 4<<10) 3237 io.Copy(ioutil.Discard, mb) 3238 } 3239 } 3240 3241 // initNPNRequest is an HTTP handler that initializes certain 3242 // uninitialized fields in its *Request. Such partially-initialized 3243 // Requests come from NPN protocol handlers. 3244 type initNPNRequest struct { 3245 c *tls.Conn 3246 h serverHandler 3247 } 3248 3249 func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 3250 if req.TLS == nil { 3251 req.TLS = &tls.ConnectionState{} 3252 *req.TLS = h.c.ConnectionState() 3253 } 3254 if req.Body == nil { 3255 req.Body = NoBody 3256 } 3257 if req.RemoteAddr == "" { 3258 req.RemoteAddr = h.c.RemoteAddr().String() 3259 } 3260 h.h.ServeHTTP(rw, req) 3261 } 3262 3263 // loggingConn is used for debugging. 3264 type loggingConn struct { 3265 name string 3266 net.Conn 3267 } 3268 3269 var ( 3270 uniqNameMu sync.Mutex 3271 uniqNameNext = make(map[string]int) 3272 ) 3273 3274 func newLoggingConn(baseName string, c net.Conn) net.Conn { 3275 uniqNameMu.Lock() 3276 defer uniqNameMu.Unlock() 3277 uniqNameNext[baseName]++ 3278 return &loggingConn{ 3279 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 3280 Conn: c, 3281 } 3282 } 3283 3284 func (c *loggingConn) Write(p []byte) (n int, err error) { 3285 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 3286 n, err = c.Conn.Write(p) 3287 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 3288 return 3289 } 3290 3291 func (c *loggingConn) Read(p []byte) (n int, err error) { 3292 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 3293 n, err = c.Conn.Read(p) 3294 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 3295 return 3296 } 3297 3298 func (c *loggingConn) Close() (err error) { 3299 log.Printf("%s.Close() = ...", c.name) 3300 err = c.Conn.Close() 3301 log.Printf("%s.Close() = %v", c.name, err) 3302 return 3303 } 3304 3305 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 3306 // It only contains one field (and a pointer field at that), so it 3307 // fits in an interface value without an extra allocation. 3308 type checkConnErrorWriter struct { 3309 c *conn 3310 } 3311 3312 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 3313 n, err = w.c.rwc.Write(p) 3314 if err != nil && w.c.werr == nil { 3315 w.c.werr = err 3316 w.c.cancelCtx() 3317 } 3318 return 3319 } 3320 3321 func numLeadingCRorLF(v []byte) (n int) { 3322 for _, b := range v { 3323 if b == '\r' || b == '\n' { 3324 n++ 3325 continue 3326 } 3327 break 3328 } 3329 return 3330 3331 } 3332 3333 func strSliceContains(ss []string, s string) bool { 3334 for _, v := range ss { 3335 if v == s { 3336 return true 3337 } 3338 } 3339 return false 3340 } 3341