Update golang dependencies

This commit is contained in:
Manuel de Brito Fontes 2017-05-20 20:11:38 -04:00
parent c5e30973e5
commit 9ddf98769a
1009 changed files with 175867 additions and 50378 deletions

View file

@ -10,12 +10,14 @@ import (
"bufio"
"bytes"
"compress/gzip"
"crypto/rand"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"sort"
@ -25,6 +27,7 @@ import (
"time"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
"golang.org/x/net/lex/httplex"
)
@ -148,27 +151,32 @@ type ClientConn struct {
readerDone chan struct{} // closed on error
readerErr error // set before readerDone is closed
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow flow // our conn-level flow control quota (cs.flow is per stream)
inflow flow // peer's conn-level flow control
closed bool
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated
nextStreamID uint32
bw *bufio.Writer
br *bufio.Reader
fr *Framer
lastActive time.Time
idleTimeout time.Duration // or 0 for never
idleTimer *time.Timer
// Settings from peer:
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow flow // our conn-level flow control quota (cs.flow is per stream)
inflow flow // peer's conn-level flow control
closed bool
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated
nextStreamID uint32
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
bw *bufio.Writer
br *bufio.Reader
fr *Framer
lastActive time.Time
// Settings from peer: (also guarded by mu)
maxFrameSize uint32
maxConcurrentStreams uint32
initialWindowSize uint32
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
freeBuf [][]byte
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
freeBuf [][]byte
wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
werr error // first write error that has occurred
@ -183,6 +191,7 @@ type clientStream struct {
ID uint32
resc chan resAndError
bufPipe pipe // buffered pipe with the flow-controlled response payload
startedWrite bool // started request body write; guarded by cc.mu
requestedGzip bool
on100 func() // optional code to run if get a 100 continue response
@ -191,6 +200,7 @@ type clientStream struct {
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
readErr error // sticky read error; owned by transportResponseBody.Read
stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
peerReset chan struct{} // closed on peer reset
resetErr error // populated before peerReset is closed
@ -218,15 +228,26 @@ func (cs *clientStream) awaitRequestCancel(req *http.Request) {
}
select {
case <-req.Cancel:
cs.cancelStream()
cs.bufPipe.CloseWithError(errRequestCanceled)
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
case <-ctx.Done():
cs.cancelStream()
cs.bufPipe.CloseWithError(ctx.Err())
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
case <-cs.done:
}
}
func (cs *clientStream) cancelStream() {
cs.cc.mu.Lock()
didReset := cs.didReset
cs.didReset = true
cs.cc.mu.Unlock()
if !didReset {
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
}
}
// checkResetOrDone reports any error sent in a RST_STREAM frame by the
// server, or errStreamClosed if the stream is complete.
func (cs *clientStream) checkResetOrDone() error {
@ -283,14 +304,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
// and returns a host:port. The port 443 is added if needed.
func authorityAddr(scheme string, authority string) (addr string) {
if _, _, err := net.SplitHostPort(authority); err == nil {
return authority
host, port, err := net.SplitHostPort(authority)
if err != nil { // authority didn't have a port
port = "443"
if scheme == "http" {
port = "80"
}
host = authority
}
port := "443"
if scheme == "http" {
port = "80"
if a, err := idna.ToASCII(host); err == nil {
host = a
}
return net.JoinHostPort(authority, port)
// IPv6 address literal, without a port:
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
return host + ":" + port
}
return net.JoinHostPort(host, port)
}
// RoundTripOpt is like RoundTrip, but takes options.
@ -308,8 +337,10 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
}
traceGotConn(req, cc)
res, err := cc.RoundTrip(req)
if shouldRetryRequest(req, err) {
continue
if err != nil {
if req, err = shouldRetryRequest(req, err); err == nil {
continue
}
}
if err != nil {
t.vlogf("RoundTrip failure: %v", err)
@ -331,15 +362,44 @@ func (t *Transport) CloseIdleConnections() {
var (
errClientConnClosed = errors.New("http2: client conn is closed")
errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written")
)
func shouldRetryRequest(req *http.Request, err error) bool {
// TODO: retry GET requests (no bodies) more aggressively, if shutdown
// before response.
return err == errClientConnUnusable
// shouldRetryRequest is called by RoundTrip when a request fails to get
// response headers. It is always called with a non-nil error.
// It returns either a request to retry (either the same request, or a
// modified clone), or an error if the request can't be replayed.
func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
switch err {
default:
return nil, err
case errClientConnUnusable, errClientConnGotGoAway:
return req, nil
case errClientConnGotGoAwayAfterSomeReqBody:
// If the Body is nil (or http.NoBody), it's safe to reuse
// this request and its Body.
if req.Body == nil || reqBodyIsNoBody(req.Body) {
return req, nil
}
// Otherwise we depend on the Request having its GetBody
// func defined.
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
if getBody == nil {
return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error")
}
body, err := getBody()
if err != nil {
return nil, err
}
newReq := *req
newReq.Body = body
return &newReq, nil
}
}
func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
@ -348,13 +408,13 @@ func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
if err != nil {
return nil, err
}
return t.NewClientConn(tconn)
return t.newClientConn(tconn, singleUse)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
cfg := new(tls.Config)
if t.TLSClientConfig != nil {
*cfg = *t.TLSClientConfig
*cfg = *cloneTLSConfig(t.TLSClientConfig)
}
if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
@ -409,14 +469,10 @@ func (t *Transport) expectContinueTimeout() time.Duration {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
if VerboseLogs {
t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
}
if _, err := c.Write(clientPreface); err != nil {
t.vlogf("client preface write error: %v", err)
return nil, err
}
return t.newClientConn(c, false)
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
cc := &ClientConn{
t: t,
tconn: c,
@ -426,7 +482,18 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
initialWindowSize: 65535, // spec default
maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
}
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
}
cc.cond = sync.NewCond(&cc.mu)
cc.flow.add(int32(initialWindowSize))
@ -454,6 +521,8 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
}
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
@ -462,33 +531,6 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return nil, cc.werr
}
// Read the obligatory SETTINGS frame
f, err := cc.fr.ReadFrame()
if err != nil {
return nil, err
}
sf, ok := f.(*SettingsFrame)
if !ok {
return nil, fmt.Errorf("expected settings frame, got: %T", f)
}
cc.fr.WriteSettingsAck()
cc.bw.Flush()
sf.ForeachSetting(func(s Setting) error {
switch s.ID {
case SettingMaxFrameSize:
cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
case SettingInitialWindowSize:
cc.initialWindowSize = s.Val
default:
// TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
t.vlogf("Unhandled Setting: %v", s)
}
return nil
})
go cc.readLoop()
return cc, nil
}
@ -507,6 +549,15 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
if old != nil && old.ErrCode != ErrCodeNo {
cc.goAway.ErrCode = old.ErrCode
}
last := f.LastStreamID
for streamID, cs := range cc.streams {
if streamID > last {
select {
case cs.resc <- resAndError{err: errClientConnGotGoAway}:
default:
}
}
}
}
func (cc *ClientConn) CanTakeNewRequest() bool {
@ -521,7 +572,17 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
}
return cc.goAway == nil && !cc.closed &&
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
cc.nextStreamID < 2147483647
cc.nextStreamID < math.MaxInt32
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
// so this simply calls the synchronized closeIfIdle to shut down this
// connection. The timer could just call closeIfIdle, but this is more
// clear.
func (cc *ClientConn) onIdleTimeout() {
cc.closeIfIdle()
}
func (cc *ClientConn) closeIfIdle() {
@ -531,9 +592,13 @@ func (cc *ClientConn) closeIfIdle() {
return
}
cc.closed = true
nextID := cc.nextStreamID
// TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock()
if VerboseLogs {
cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
}
cc.tconn.Close()
}
@ -593,8 +658,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
}
if len(keys) > 0 {
sort.Strings(keys)
// TODO: could do better allocation-wise here, but trailers are rare,
// so being lazy for now.
return strings.Join(keys, ","), nil
}
return "", nil
@ -616,48 +679,37 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
// Certain headers are special-cased as okay but not transmitted later.
func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" {
return errors.New("http2: invalid Upgrade request header")
return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
}
if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
return errors.New("http2: invalid Transfer-Encoding request header")
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
}
if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
return errors.New("http2: invalid Connection request header")
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
}
return nil
}
func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
body = req.Body
if body == nil {
return nil, 0
// actualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
func actualContentLength(req *http.Request) int64 {
if req.Body == nil {
return 0
}
if req.ContentLength != 0 {
return req.Body, req.ContentLength
return req.ContentLength
}
// We have a body but a zero content length. Test to see if
// it's actually zero or just unset.
var buf [1]byte
n, rerr := io.ReadFull(body, buf[:])
if rerr != nil && rerr != io.EOF {
return errorReader{rerr}, -1
}
if n == 1 {
// Oh, guess there is data in this Body Reader after all.
// The ContentLength field just wasn't set.
// Stich the Body back together again, re-attaching our
// consumed byte.
return io.MultiReader(bytes.NewReader(buf[:]), body), -1
}
// Body is actually zero bytes.
return nil, 0
return -1
}
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
if err := checkConnHeaders(req); err != nil {
return nil, err
}
if cc.idleTimer != nil {
cc.idleTimer.Stop()
}
trailers, err := commaSeparatedTrailers(req)
if err != nil {
@ -665,9 +717,6 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
}
hasTrailers := trailers != ""
body, contentLen := bodyAndLength(req)
hasBody := body != nil
cc.mu.Lock()
cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
@ -675,6 +724,10 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return nil, errClientConnUnusable
}
body := req.Body
hasBody := body != nil
contentLen := actualContentLength(req)
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool
if !cc.t.disableCompression() &&
@ -747,30 +800,41 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
bodyWritten := false
ctx := reqContext(req)
handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
res := re.res
if re.err != nil || res.StatusCode > 299 {
// On error or status code 3xx, 4xx, 5xx, etc abort any
// ongoing write, assuming that the server doesn't care
// about our request body. If the server replied with 1xx or
// 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server
// doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite)
}
if re.err != nil {
if re.err == errClientConnGotGoAway {
cc.mu.Lock()
if cs.startedWrite {
re.err = errClientConnGotGoAwayAfterSomeReqBody
}
cc.mu.Unlock()
}
cc.forgetStreamID(cs.ID)
return nil, re.err
}
res.Request = req
res.TLS = cc.tlsState
return res, nil
}
for {
select {
case re := <-readLoopResCh:
res := re.res
if re.err != nil || res.StatusCode > 299 {
// On error or status code 3xx, 4xx, 5xx, etc abort any
// ongoing write, assuming that the server doesn't care
// about our request body. If the server replied with 1xx or
// 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server
// doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite)
}
if re.err != nil {
cc.forgetStreamID(cs.ID)
return nil, re.err
}
res.Request = req
res.TLS = cc.tlsState
return res, nil
return handleReadLoopResponse(re)
case <-respHeaderTimer:
cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten {
@ -804,6 +868,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// forgetStreamID.
return nil, cs.resetErr
case err := <-bodyWriter.resc:
// Prefer the read loop's response, if available. Issue 16102.
select {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
default:
}
if err != nil {
return nil, err
}
@ -908,10 +978,11 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
err = cc.fr.WriteData(cs.ID, sentEnd, data)
if err == nil {
// TODO(bradfitz): this flush is for latency, not bandwidth.
// Most requests won't need this. Make this opt-in or opt-out?
// Use some heuristic on the body type? Nagel-like timers?
// Based on 'n'? Only last chunk of this for loop, unless flow control
// tokens are low? For now, always:
// Most requests won't need this. Make this opt-in or
// opt-out? Use some heuristic on the body type? Nagel-like
// timers? Based on 'n'? Only last chunk of this for loop,
// unless flow control tokens are low? For now, always.
// If we change this, see comment below.
err = cc.bw.Flush()
}
cc.wmu.Unlock()
@ -921,28 +992,33 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
}
}
cc.wmu.Lock()
if !sentEnd {
var trls []byte
if hasTrailers {
cc.mu.Lock()
trls = cc.encodeTrailers(req)
cc.mu.Unlock()
}
if sentEnd {
// Already sent END_STREAM (which implies we have no
// trailers) and flushed, because currently all
// WriteData frames above get a flush. So we're done.
return nil
}
// Avoid forgetting to send an END_STREAM if the encoded
// trailers are 0 bytes. Both results produce and END_STREAM.
if len(trls) > 0 {
err = cc.writeHeaders(cs.ID, true, trls)
} else {
err = cc.fr.WriteData(cs.ID, true, nil)
}
var trls []byte
if hasTrailers {
cc.mu.Lock()
defer cc.mu.Unlock()
trls = cc.encodeTrailers(req)
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
// Two ways to send END_STREAM: either with trailers, or
// with an empty DATA frame.
if len(trls) > 0 {
err = cc.writeHeaders(cs.ID, true, trls)
} else {
err = cc.fr.WriteData(cs.ID, true, nil)
}
if ferr := cc.bw.Flush(); ferr != nil && err == nil {
err = ferr
}
cc.wmu.Unlock()
return err
}
@ -995,6 +1071,26 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if host == "" {
host = req.URL.Host
}
host, err := httplex.PunycodeHostPort(host)
if err != nil {
return nil, err
}
var path string
if req.Method != "CONNECT" {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return nil, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers and return an error before we
// potentially pollute our hpack state. (We want to be able to
@ -1018,8 +1114,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
cc.writeHeader(":authority", host)
cc.writeHeader(":method", req.Method)
if req.Method != "CONNECT" {
cc.writeHeader(":path", req.URL.RequestURI())
cc.writeHeader(":scheme", "https")
cc.writeHeader(":path", path)
cc.writeHeader(":scheme", req.URL.Scheme)
}
if trailers != "" {
cc.writeHeader("trailer", trailers)
@ -1146,6 +1242,9 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
if andRemove && cs != nil && !cc.closed {
cc.lastActive = time.Now()
delete(cc.streams, id)
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
}
close(cs.done)
cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
}
@ -1188,27 +1287,37 @@ func (e GoAwayError) Error() string {
e.LastStreamID, e.ErrCode, e.DebugData)
}
func isEOFOrNetReadError(err error) bool {
if err == io.EOF {
return true
}
ne, ok := err.(*net.OpError)
return ok && ne.Op == "read"
}
func (rl *clientConnReadLoop) cleanup() {
cc := rl.cc
defer cc.tconn.Close()
defer cc.t.connPool().MarkDead(cc)
defer close(cc.readerDone)
if cc.idleTimer != nil {
cc.idleTimer.Stop()
}
// Close any response bodies if the server closes prematurely.
// TODO: also do this if we've written the headers but not
// gotten a response yet.
err := cc.readerErr
cc.mu.Lock()
if err == io.EOF {
if cc.goAway != nil {
err = GoAwayError{
LastStreamID: cc.goAway.LastStreamID,
ErrCode: cc.goAway.ErrCode,
DebugData: cc.goAwayDebug,
}
} else {
err = io.ErrUnexpectedEOF
if cc.goAway != nil && isEOFOrNetReadError(err) {
err = GoAwayError{
LastStreamID: cc.goAway.LastStreamID,
ErrCode: cc.goAway.ErrCode,
DebugData: cc.goAwayDebug,
}
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
for _, cs := range rl.activeRes {
cs.bufPipe.CloseWithError(err)
@ -1228,15 +1337,20 @@ func (rl *clientConnReadLoop) cleanup() {
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
gotReply := false // ever saw a reply
gotReply := false // ever saw a HEADERS reply
gotSettings := false
for {
f, err := cc.fr.ReadFrame()
if err != nil {
cc.vlogf("Transport readFrame error: (%T) %v", err, err)
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(StreamError); ok {
if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
rl.endStreamError(cs, cc.fr.errDetail)
cs.cc.writeStreamReset(cs.ID, se.Code, err)
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
rl.endStreamError(cs, se)
}
continue
} else if err != nil {
@ -1245,6 +1359,13 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs {
cc.vlogf("http2: Transport received %s", summarizeFrame(f))
}
if !gotSettings {
if _, ok := f.(*SettingsFrame); !ok {
cc.logf("protocol error: received %T before a SETTINGS frame", f)
return ConnectionError(ErrCodeProtocol)
}
gotSettings = true
}
maybeIdle := false // whether frame might transition us to idle
switch f := f.(type) {
@ -1273,6 +1394,9 @@ func (rl *clientConnReadLoop) run() error {
cc.logf("Transport: unhandled response frame type %T", f)
}
if err != nil {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
return err
}
if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
@ -1522,10 +1646,27 @@ var errClosedResponseBody = errors.New("http2: response body closed")
func (b transportResponseBody) Close() error {
cs := b.cs
if cs.bufPipe.Err() != io.EOF {
// TODO: write test for this
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
cc := cs.cc
serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
unread := cs.bufPipe.Len()
if unread > 0 || !serverSentStreamEnd {
cc.mu.Lock()
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
}
// Return connection-level flow control.
if unread > 0 {
cc.inflow.add(int32(unread))
cc.fr.WriteWindowUpdate(0, uint32(unread))
}
cc.bw.Flush()
cc.wmu.Unlock()
cc.mu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
return nil
}
@ -1533,6 +1674,7 @@ func (b transportResponseBody) Close() error {
func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc
cs := cc.streamByID(f.StreamID, f.StreamEnded())
data := f.Data()
if cs == nil {
cc.mu.Lock()
neverSent := cc.nextStreamID
@ -1546,10 +1688,22 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// TODO: be stricter here? only silently ignore things which
// we canceled, but not things which were closed normally
// by the peer? Tough without accumulating too much state.
// But at least return their flow control:
if f.Length > 0 {
cc.mu.Lock()
cc.inflow.add(int32(f.Length))
cc.mu.Unlock()
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(f.Length))
cc.bw.Flush()
cc.wmu.Unlock()
}
return nil
}
if data := f.Data(); len(data) > 0 {
if cs.bufPipe.b == nil {
if f.Length > 0 {
if len(data) > 0 && cs.bufPipe.b == nil {
// Data frame after it's already closed?
cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
return ConnectionError(ErrCodeProtocol)
@ -1557,17 +1711,31 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// Check connection-level flow control.
cc.mu.Lock()
if cs.inflow.available() >= int32(len(data)) {
cs.inflow.take(int32(len(data)))
if cs.inflow.available() >= int32(f.Length) {
cs.inflow.take(int32(f.Length))
} else {
cc.mu.Unlock()
return ConnectionError(ErrCodeFlowControl)
}
// Return any padded flow control now, since we won't
// refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
cs.inflow.add(pad)
cc.inflow.add(pad)
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(pad))
cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
cc.bw.Flush()
cc.wmu.Unlock()
}
didReset := cs.didReset
cc.mu.Unlock()
if _, err := cs.bufPipe.Write(data); err != nil {
rl.endStreamError(cs, err)
return err
if len(data) > 0 && !didReset {
if _, err := cs.bufPipe.Write(data); err != nil {
rl.endStreamError(cs, err)
return err
}
}
}
@ -1593,9 +1761,14 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
}
cs.bufPipe.closeWithErrorAndCode(err, code)
delete(rl.activeRes, cs.ID)
if cs.req.Close || cs.req.Header.Get("Connection") == "close" {
if isConnectionCloseRequest(cs.req) {
rl.closeWhenIdle = true
}
select {
case cs.resc <- resAndError{err: err}:
default:
}
}
func (cs *clientStream) copyTrailers() {
@ -1623,18 +1796,39 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
return f.ForeachSetting(func(s Setting) error {
if f.IsAck() {
if cc.wantSettingsAck {
cc.wantSettingsAck = false
return nil
}
return ConnectionError(ErrCodeProtocol)
}
err := f.ForeachSetting(func(s Setting) error {
switch s.ID {
case SettingMaxFrameSize:
cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
case SettingInitialWindowSize:
// TODO: error if this is too large.
// Values above the maximum flow-control
// window size of 2^31-1 MUST be treated as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR.
if s.Val > math.MaxInt32 {
return ConnectionError(ErrCodeFlowControl)
}
// TODO: adjust flow control of still-open
// Adjust flow control of currently-open
// frames by the difference of the old initial
// window size and this one.
delta := int32(s.Val) - int32(cc.initialWindowSize)
for _, cs := range cc.streams {
cs.flow.add(delta)
}
cc.cond.Broadcast()
cc.initialWindowSize = s.Val
default:
// TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
@ -1642,6 +1836,16 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
}
return nil
})
if err != nil {
return err
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
cc.fr.WriteSettingsAck()
cc.bw.Flush()
return cc.werr
}
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
@ -1678,7 +1882,7 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
// which closes this, so there
// isn't a race.
default:
err := StreamError{cs.ID, f.ErrCode}
err := streamError(cs.ID, f.ErrCode)
cs.resetErr = err
close(cs.peerReset)
cs.bufPipe.CloseWithError(err)
@ -1688,10 +1892,56 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
return nil
}
// Ping sends a PING frame to the server and waits for the ack.
// Public implementation is in go17.go and not_go17.go
func (cc *ClientConn) ping(ctx contextContext) error {
c := make(chan struct{})
// Generate a random payload
var p [8]byte
for {
if _, err := rand.Read(p[:]); err != nil {
return err
}
cc.mu.Lock()
// check for dup before insert
if _, found := cc.pings[p]; !found {
cc.pings[p] = c
cc.mu.Unlock()
break
}
cc.mu.Unlock()
}
cc.wmu.Lock()
if err := cc.fr.WritePing(false, p); err != nil {
cc.wmu.Unlock()
return err
}
if err := cc.bw.Flush(); err != nil {
cc.wmu.Unlock()
return err
}
cc.wmu.Unlock()
select {
case <-c:
return nil
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
// connection closed
return cc.readerErr
}
}
func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if f.IsAck() {
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
// If ack, notify listener if any
if c, ok := cc.pings[f.Data]; ok {
close(c)
delete(cc.pings, f.Data)
}
return nil
}
cc := rl.cc
@ -1715,8 +1965,10 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
}
func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
// TODO: do something with err? send it as a debug frame to the peer?
// But that's only in GOAWAY. Invent a new frame type? Is there one already?
// TODO: map err to more interesting error codes, once the
// HTTP community comes up with some. But currently for
// RST_STREAM there's no equivalent to GOAWAY frame's debug
// data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code)
cc.bw.Flush()
@ -1811,6 +2063,9 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body
resc := make(chan error, 1)
s.resc = resc
s.fn = func() {
cs.cc.mu.Lock()
cs.startedWrite = true
cs.cc.mu.Unlock()
resc <- cs.writeRequestBody(body, cs.req.Body)
}
s.delay = t.expectContinueTimeout()
@ -1866,3 +2121,9 @@ func (s bodyWriterState) scheduleBodyWrite() {
s.timer.Reset(s.delay)
}
}
// isConnectionCloseRequest reports whether req should use its own
// connection for a single request and then close the connection.
func isConnectionCloseRequest(req *http.Request) bool {
return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
}