Source file src/net/http/internal/http2/server.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // TODO: turn off the serve goroutine when idle, so
     6  // an idle conn only has the readFrames goroutine active. (which could
     7  // also be optimized probably to pin less memory in crypto/tls). This
     8  // would involve tracking when the serve goroutine is active (atomic
     9  // int32 read/CAS probably?) and starting it up when frames arrive,
    10  // and shutting it down when all handlers exit. the occasional PING
    11  // packets could use time.AfterFunc to call sc.wakeStartServeLoop()
    12  // (which is a no-op if already running) and then queue the PING write
    13  // as normal. The serve loop would then exit in most cases (if no
    14  // Handlers running) and not be woken up again until the PING packet
    15  // returns.
    16  
    17  // TODO (maybe): add a mechanism for Handlers to going into
    18  // half-closed-local mode (rw.(io.Closer) test?) but not exit their
    19  // handler, and continue to be able to read from the
    20  // Request.Body. This would be a somewhat semantic change from HTTP/1
    21  // (or at least what we expose in net/http), so I'd probably want to
    22  // add it there too. For now, this package says that returning from
    23  // the Handler ServeHTTP function means you're both done reading and
    24  // done writing, without a way to stop just one or the other.
    25  
    26  package http2
    27  
    28  import (
    29  	"bufio"
    30  	"bytes"
    31  	"context"
    32  	"crypto/rand"
    33  	"crypto/tls"
    34  	"errors"
    35  	"fmt"
    36  	"io"
    37  	"log"
    38  	"math"
    39  	"net"
    40  	"net/http/internal"
    41  	"net/http/internal/httpcommon"
    42  	"net/textproto"
    43  	"net/url"
    44  	"os"
    45  	"reflect"
    46  	"runtime"
    47  	"strconv"
    48  	"strings"
    49  	"sync"
    50  	"time"
    51  
    52  	"golang.org/x/net/http/httpguts"
    53  	"golang.org/x/net/http2/hpack"
    54  )
    55  
    56  const (
    57  	prefaceTimeout        = 10 * time.Second
    58  	firstSettingsTimeout  = 2 * time.Second // should be in-flight with preface anyway
    59  	handlerChunkWriteSize = 4 << 10
    60  	defaultMaxStreams     = 250 // TODO: make this 100 as the GFE seems to?
    61  
    62  	// maxQueuedControlFrames is the maximum number of control frames like
    63  	// SETTINGS, PING and RST_STREAM that will be queued for writing before
    64  	// the connection is closed to prevent memory exhaustion attacks.
    65  	maxQueuedControlFrames = 10000
    66  )
    67  
    68  var (
    69  	errClientDisconnected = errors.New("client disconnected")
    70  	errClosedBody         = errors.New("body closed by handler")
    71  	errHandlerComplete    = errors.New("http2: request body closed due to handler exiting")
    72  	errStreamClosed       = errors.New("http2: stream closed")
    73  )
    74  
    75  var responseWriterStatePool = sync.Pool{
    76  	New: func() interface{} {
    77  		rws := &responseWriterState{}
    78  		rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
    79  		return rws
    80  	},
    81  }
    82  
    83  // Test hooks.
    84  var (
    85  	testHookOnConn    func()
    86  	testHookOnPanicMu *sync.Mutex // nil except in tests
    87  	testHookOnPanic   func(sc *serverConn, panicVal interface{}) (rePanic bool)
    88  )
    89  
    90  // Server is an HTTP/2 server.
    91  type Server struct {
    92  	// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
    93  	// which may run at a time over all connections.
    94  	// Negative or zero no limit.
    95  	// TODO: implement
    96  	MaxHandlers int
    97  
    98  	// MaxConcurrentStreams optionally specifies the number of
    99  	// concurrent streams that each client may have open at a
   100  	// time. This is unrelated to the number of http.Handler goroutines
   101  	// which may be active globally, which is MaxHandlers.
   102  	// If zero, MaxConcurrentStreams defaults to at least 100, per
   103  	// the HTTP/2 spec's recommendations.
   104  	MaxConcurrentStreams uint32
   105  
   106  	// MaxDecoderHeaderTableSize optionally specifies the http2
   107  	// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
   108  	// informs the remote endpoint of the maximum size of the header compression
   109  	// table used to decode header blocks, in octets. If zero, the default value
   110  	// of 4096 is used.
   111  	MaxDecoderHeaderTableSize uint32
   112  
   113  	// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
   114  	// header compression table used for encoding request headers. Received
   115  	// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
   116  	// the default value of 4096 is used.
   117  	MaxEncoderHeaderTableSize uint32
   118  
   119  	// MaxReadFrameSize optionally specifies the largest frame
   120  	// this server is willing to read. A valid value is between
   121  	// 16k and 16M, inclusive. If zero or otherwise invalid, a
   122  	// default value is used.
   123  	MaxReadFrameSize uint32
   124  
   125  	// PermitProhibitedCipherSuites, if true, permits the use of
   126  	// cipher suites prohibited by the HTTP/2 spec.
   127  	PermitProhibitedCipherSuites bool
   128  
   129  	// IdleTimeout specifies how long until idle clients should be
   130  	// closed with a GOAWAY frame. PING frames are not considered
   131  	// activity for the purposes of IdleTimeout.
   132  	// If zero or negative, there is no timeout.
   133  	IdleTimeout time.Duration
   134  
   135  	// ReadIdleTimeout is the timeout after which a health check using a ping
   136  	// frame will be carried out if no frame is received on the connection.
   137  	// If zero, no health check is performed.
   138  	ReadIdleTimeout time.Duration
   139  
   140  	// PingTimeout is the timeout after which the connection will be closed
   141  	// if a response to a ping is not received.
   142  	// If zero, a default of 15 seconds is used.
   143  	PingTimeout time.Duration
   144  
   145  	// WriteByteTimeout is the timeout after which a connection will be
   146  	// closed if no data can be written to it. The timeout begins when data is
   147  	// available to write, and is extended whenever any bytes are written.
   148  	// If zero or negative, there is no timeout.
   149  	WriteByteTimeout time.Duration
   150  
   151  	// MaxUploadBufferPerConnection is the size of the initial flow
   152  	// control window for each connections. The HTTP/2 spec does not
   153  	// allow this to be smaller than 65535 or larger than 2^32-1.
   154  	// If the value is outside this range, a default value will be
   155  	// used instead.
   156  	MaxUploadBufferPerConnection int32
   157  
   158  	// MaxUploadBufferPerStream is the size of the initial flow control
   159  	// window for each stream. The HTTP/2 spec does not allow this to
   160  	// be larger than 2^32-1. If the value is zero or larger than the
   161  	// maximum, a default value will be used instead.
   162  	MaxUploadBufferPerStream int32
   163  
   164  	// NewWriteScheduler constructs a write scheduler for a connection.
   165  	// If nil, a default scheduler is chosen.
   166  	NewWriteScheduler func() WriteScheduler
   167  
   168  	// CountError, if non-nil, is called on HTTP/2 server errors.
   169  	// It's intended to increment a metric for monitoring, such
   170  	// as an expvar or Prometheus metric.
   171  	// The errType consists of only ASCII word characters.
   172  	CountError func(errType string)
   173  
   174  	// Internal state. This is a pointer (rather than embedded directly)
   175  	// so that we don't embed a Mutex in this struct, which will make the
   176  	// struct non-copyable, which might break some callers.
   177  	state *serverInternalState
   178  }
   179  
   180  type serverInternalState struct {
   181  	mu          sync.Mutex
   182  	activeConns map[*serverConn]struct{}
   183  
   184  	// Pool of error channels. This is per-Server rather than global
   185  	// because channels can't be reused across synctest bubbles.
   186  	errChanPool sync.Pool
   187  }
   188  
   189  func (s *serverInternalState) registerConn(sc *serverConn) {
   190  	if s == nil {
   191  		return // if the Server was used without calling ConfigureServer
   192  	}
   193  	s.mu.Lock()
   194  	s.activeConns[sc] = struct{}{}
   195  	s.mu.Unlock()
   196  }
   197  
   198  func (s *serverInternalState) unregisterConn(sc *serverConn) {
   199  	if s == nil {
   200  		return // if the Server was used without calling ConfigureServer
   201  	}
   202  	s.mu.Lock()
   203  	delete(s.activeConns, sc)
   204  	s.mu.Unlock()
   205  }
   206  
   207  func (s *serverInternalState) startGracefulShutdown() {
   208  	if s == nil {
   209  		return // if the Server was used without calling ConfigureServer
   210  	}
   211  	s.mu.Lock()
   212  	for sc := range s.activeConns {
   213  		sc.startGracefulShutdown()
   214  	}
   215  	s.mu.Unlock()
   216  }
   217  
   218  // Global error channel pool used for uninitialized Servers.
   219  // We use a per-Server pool when possible to avoid using channels across synctest bubbles.
   220  var errChanPool = sync.Pool{
   221  	New: func() any { return make(chan error, 1) },
   222  }
   223  
   224  func (s *serverInternalState) getErrChan() chan error {
   225  	if s == nil {
   226  		return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
   227  	}
   228  	return s.errChanPool.Get().(chan error)
   229  }
   230  
   231  func (s *serverInternalState) putErrChan(ch chan error) {
   232  	if s == nil {
   233  		errChanPool.Put(ch) // Server used without calling ConfigureServer
   234  		return
   235  	}
   236  	s.errChanPool.Put(ch)
   237  }
   238  
   239  func (s *Server) Configure(conf ServerConfig, tcfg *tls.Config) error {
   240  	s.state = &serverInternalState{
   241  		activeConns: make(map[*serverConn]struct{}),
   242  		errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
   243  	}
   244  
   245  	if tcfg.CipherSuites != nil && tcfg.MinVersion < tls.VersionTLS13 {
   246  		// If they already provided a TLS 1.0–1.2 CipherSuite list, return an
   247  		// error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
   248  		// ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
   249  		haveRequired := false
   250  		for _, cs := range tcfg.CipherSuites {
   251  			switch cs {
   252  			case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
   253  				// Alternative MTI cipher to not discourage ECDSA-only servers.
   254  				// See http://golang.org/cl/30721 for further information.
   255  				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
   256  				haveRequired = true
   257  			}
   258  		}
   259  		if !haveRequired {
   260  			return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
   261  		}
   262  	}
   263  
   264  	// Note: not setting MinVersion to tls.VersionTLS12,
   265  	// as we don't want to interfere with HTTP/1.1 traffic
   266  	// on the user's server. We enforce TLS 1.2 later once
   267  	// we accept a connection. Ideally this should be done
   268  	// during next-proto selection, but using TLS <1.2 with
   269  	// HTTP/2 is still the client's bug.
   270  
   271  	return nil
   272  }
   273  
   274  func (s *Server) GracefulShutdown() {
   275  	s.state.startGracefulShutdown()
   276  }
   277  
   278  // ServeConnOpts are options for the Server.ServeConn method.
   279  type ServeConnOpts struct {
   280  	// Context is the base context to use.
   281  	// If nil, context.Background is used.
   282  	Context context.Context
   283  
   284  	// BaseConfig optionally sets the base configuration
   285  	// for values. If nil, defaults are used.
   286  	BaseConfig ServerConfig
   287  
   288  	// Handler specifies which handler to use for processing
   289  	// requests. If nil, BaseConfig.Handler is used. If BaseConfig
   290  	// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
   291  	Handler Handler
   292  
   293  	// Settings is the decoded contents of the HTTP2-Settings header
   294  	// in an h2c upgrade request.
   295  	Settings []byte
   296  
   297  	// SawClientPreface is set if the HTTP/2 connection preface
   298  	// has already been read from the connection.
   299  	SawClientPreface bool
   300  }
   301  
   302  func (o *ServeConnOpts) context() context.Context {
   303  	if o != nil && o.Context != nil {
   304  		return o.Context
   305  	}
   306  	return context.Background()
   307  }
   308  
   309  // ServeConn serves HTTP/2 requests on the provided connection and
   310  // blocks until the connection is no longer readable.
   311  //
   312  // ServeConn starts speaking HTTP/2 assuming that c has not had any
   313  // reads or writes. It writes its initial settings frame and expects
   314  // to be able to read the preface and settings frame from the
   315  // client. If c has a ConnectionState method like a *tls.Conn, the
   316  // ConnectionState is used to verify the TLS ciphersuite and to set
   317  // the Request.TLS field in Handlers.
   318  //
   319  // ServeConn does not support h2c by itself. Any h2c support must be
   320  // implemented in terms of providing a suitably-behaving net.Conn.
   321  //
   322  // The opts parameter is optional. If nil, default values are used.
   323  func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
   324  	if opts == nil {
   325  		opts = &ServeConnOpts{}
   326  	}
   327  
   328  	var newf func(*serverConn)
   329  	if inTests {
   330  		// Fetch NewConnContextKey if set, leave newf as nil otherwise.
   331  		newf, _ = opts.Context.Value(NewConnContextKey).(func(*serverConn))
   332  	}
   333  
   334  	s.serveConn(c, opts, newf)
   335  }
   336  
   337  type contextKey string
   338  
   339  var (
   340  	NewConnContextKey         = new("NewConnContextKey")
   341  	ConnectionStateContextKey = new("ConnectionStateContextKey")
   342  )
   343  
   344  func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) {
   345  	baseCtx, cancel := serverConnBaseContext(c, opts)
   346  	defer cancel()
   347  
   348  	conf := configFromServer(opts.BaseConfig, s)
   349  	sc := &serverConn{
   350  		srv:                         s,
   351  		hs:                          opts.BaseConfig,
   352  		conn:                        c,
   353  		baseCtx:                     baseCtx,
   354  		remoteAddrStr:               c.RemoteAddr().String(),
   355  		bw:                          newBufferedWriter(c, conf.WriteByteTimeout),
   356  		handler:                     opts.Handler,
   357  		streams:                     make(map[uint32]*stream),
   358  		readFrameCh:                 make(chan readFrameResult),
   359  		wantWriteFrameCh:            make(chan FrameWriteRequest, 8),
   360  		serveMsgCh:                  make(chan interface{}, 8),
   361  		wroteFrameCh:                make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
   362  		bodyReadCh:                  make(chan bodyReadMsg),         // buffering doesn't matter either way
   363  		doneServing:                 make(chan struct{}),
   364  		clientMaxStreams:            math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
   365  		advMaxStreams:               uint32(conf.MaxConcurrentStreams),
   366  		initialStreamSendWindowSize: initialWindowSize,
   367  		initialStreamRecvWindowSize: int32(conf.MaxReceiveBufferPerStream),
   368  		maxFrameSize:                initialMaxFrameSize,
   369  		pingTimeout:                 conf.PingTimeout,
   370  		countErrorFunc:              conf.CountError,
   371  		serveG:                      newGoroutineLock(),
   372  		pushEnabled:                 true,
   373  		sawClientPreface:            opts.SawClientPreface,
   374  	}
   375  	if newf != nil {
   376  		newf(sc)
   377  	}
   378  
   379  	s.state.registerConn(sc)
   380  	defer s.state.unregisterConn(sc)
   381  
   382  	// The net/http package sets the write deadline from the
   383  	// http.Server.WriteTimeout during the TLS handshake, but then
   384  	// passes the connection off to us with the deadline already set.
   385  	// Write deadlines are set per stream in serverConn.newStream.
   386  	// Disarm the net.Conn write deadline here.
   387  	if sc.hs.WriteTimeout() > 0 {
   388  		sc.conn.SetWriteDeadline(time.Time{})
   389  	}
   390  
   391  	switch {
   392  	case s.NewWriteScheduler != nil:
   393  		sc.writeSched = s.NewWriteScheduler()
   394  	case sc.hs.DisableClientPriority():
   395  		sc.writeSched = newRoundRobinWriteScheduler()
   396  	default:
   397  		sc.writeSched = newPriorityWriteSchedulerRFC9218()
   398  	}
   399  
   400  	// These start at the RFC-specified defaults. If there is a higher
   401  	// configured value for inflow, that will be updated when we send a
   402  	// WINDOW_UPDATE shortly after sending SETTINGS.
   403  	sc.flow.add(initialWindowSize)
   404  	sc.inflow.init(initialWindowSize)
   405  	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
   406  	sc.hpackEncoder.SetMaxDynamicTableSizeLimit(uint32(conf.MaxEncoderHeaderTableSize))
   407  
   408  	fr := NewFramer(sc.bw, c)
   409  	if conf.CountError != nil {
   410  		fr.countError = conf.CountError
   411  	}
   412  	fr.ReadMetaHeaders = hpack.NewDecoder(uint32(conf.MaxDecoderHeaderTableSize), nil)
   413  	fr.MaxHeaderListSize = sc.maxHeaderListSize()
   414  	fr.SetMaxReadFrameSize(uint32(conf.MaxReadFrameSize))
   415  	sc.framer = fr
   416  
   417  	if tc, ok := c.(connectionStater); ok {
   418  		sc.tlsState = new(tls.ConnectionState)
   419  		*sc.tlsState = tc.ConnectionState()
   420  
   421  		// Optionally override the ConnectionState in tests.
   422  		if inTests {
   423  			f, ok := opts.Context.Value(ConnectionStateContextKey).(func() tls.ConnectionState)
   424  			if ok {
   425  				*sc.tlsState = f()
   426  			}
   427  		}
   428  
   429  		// 9.2 Use of TLS Features
   430  		// An implementation of HTTP/2 over TLS MUST use TLS
   431  		// 1.2 or higher with the restrictions on feature set
   432  		// and cipher suite described in this section. Due to
   433  		// implementation limitations, it might not be
   434  		// possible to fail TLS negotiation. An endpoint MUST
   435  		// immediately terminate an HTTP/2 connection that
   436  		// does not meet the TLS requirements described in
   437  		// this section with a connection error (Section
   438  		// 5.4.1) of type INADEQUATE_SECURITY.
   439  		if sc.tlsState.Version < tls.VersionTLS12 {
   440  			sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
   441  			return
   442  		}
   443  
   444  		if sc.tlsState.ServerName == "" {
   445  			// Client must use SNI, but we don't enforce that anymore,
   446  			// since it was causing problems when connecting to bare IP
   447  			// addresses during development.
   448  			//
   449  			// TODO: optionally enforce? Or enforce at the time we receive
   450  			// a new request, and verify the ServerName matches the :authority?
   451  			// But that precludes proxy situations, perhaps.
   452  			//
   453  			// So for now, do nothing here again.
   454  		}
   455  
   456  		if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
   457  			// "Endpoints MAY choose to generate a connection error
   458  			// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
   459  			// the prohibited cipher suites are negotiated."
   460  			//
   461  			// We choose that. In my opinion, the spec is weak
   462  			// here. It also says both parties must support at least
   463  			// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
   464  			// excuses here. If we really must, we could allow an
   465  			// "AllowInsecureWeakCiphers" option on the server later.
   466  			// Let's see how it plays out first.
   467  			sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
   468  			return
   469  		}
   470  	}
   471  
   472  	if opts.Settings != nil {
   473  		fr := &SettingsFrame{
   474  			FrameHeader: FrameHeader{valid: true},
   475  			p:           opts.Settings,
   476  		}
   477  		if err := fr.ForeachSetting(sc.processSetting); err != nil {
   478  			sc.rejectConn(ErrCodeProtocol, "invalid settings")
   479  			return
   480  		}
   481  		opts.Settings = nil
   482  	}
   483  
   484  	sc.serve(conf)
   485  }
   486  
   487  func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
   488  	return context.WithCancel(opts.context())
   489  }
   490  
   491  func (sc *serverConn) rejectConn(err ErrCode, debug string) {
   492  	sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
   493  	// ignoring errors. hanging up anyway.
   494  	sc.framer.WriteGoAway(0, err, []byte(debug))
   495  	sc.bw.Flush()
   496  	sc.conn.Close()
   497  }
   498  
   499  type serverConn struct {
   500  	// Immutable:
   501  	srv              *Server
   502  	hs               ServerConfig
   503  	conn             net.Conn
   504  	bw               *bufferedWriter // writing to conn
   505  	handler          Handler
   506  	baseCtx          context.Context
   507  	framer           *Framer
   508  	doneServing      chan struct{}          // closed when serverConn.serve ends
   509  	readFrameCh      chan readFrameResult   // written by serverConn.readFrames
   510  	wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
   511  	wroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes
   512  	bodyReadCh       chan bodyReadMsg       // from handlers -> serve
   513  	serveMsgCh       chan interface{}       // misc messages & code to send to / run on the serve loop
   514  	flow             outflow                // conn-wide (not stream-specific) outbound flow control
   515  	inflow           inflow                 // conn-wide inbound flow control
   516  	tlsState         *tls.ConnectionState   // shared by all handlers, like net/http
   517  	remoteAddrStr    string
   518  	writeSched       WriteScheduler
   519  	countErrorFunc   func(errType string)
   520  
   521  	// Everything following is owned by the serve loop; use serveG.check():
   522  	serveG                      goroutineLock // used to verify funcs are on serve()
   523  	pushEnabled                 bool
   524  	sawClientPreface            bool // preface has already been read, used in h2c upgrade
   525  	sawFirstSettings            bool // got the initial SETTINGS frame after the preface
   526  	needToSendSettingsAck       bool
   527  	unackedSettings             int    // how many SETTINGS have we sent without ACKs?
   528  	queuedControlFrames         int    // control frames in the writeSched queue
   529  	clientMaxStreams            uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
   530  	advMaxStreams               uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
   531  	curClientStreams            uint32 // number of open streams initiated by the client
   532  	curPushedStreams            uint32 // number of open streams initiated by server push
   533  	curHandlers                 uint32 // number of running handler goroutines
   534  	maxClientStreamID           uint32 // max ever seen from client (odd), or 0 if there have been no client requests
   535  	maxPushPromiseID            uint32 // ID of the last push promise (even), or 0 if there have been no pushes
   536  	streams                     map[uint32]*stream
   537  	unstartedHandlers           []unstartedHandler
   538  	initialStreamSendWindowSize int32
   539  	initialStreamRecvWindowSize int32
   540  	maxFrameSize                int32
   541  	peerMaxHeaderListSize       uint32            // zero means unknown (default)
   542  	canonHeader                 map[string]string // http2-lower-case -> Go-Canonical-Case
   543  	canonHeaderKeysSize         int               // canonHeader keys size in bytes
   544  	writingFrame                bool              // started writing a frame (on serve goroutine or separate)
   545  	writingFrameAsync           bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh
   546  	needsFrameFlush             bool              // last frame write wasn't a flush
   547  	inGoAway                    bool              // we've started to or sent GOAWAY
   548  	inFrameScheduleLoop         bool              // whether we're in the scheduleFrameWrite loop
   549  	needToSendGoAway            bool              // we need to schedule a GOAWAY frame write
   550  	pingSent                    bool
   551  	sentPingData                [8]byte
   552  	goAwayCode                  ErrCode
   553  	shutdownTimer               *time.Timer // nil until used
   554  	idleTimer                   *time.Timer // nil if unused
   555  	readIdleTimeout             time.Duration
   556  	pingTimeout                 time.Duration
   557  	readIdleTimer               *time.Timer // nil if unused
   558  
   559  	// Owned by the writeFrameAsync goroutine:
   560  	headerWriteBuf bytes.Buffer
   561  	hpackEncoder   *hpack.Encoder
   562  
   563  	// Used by startGracefulShutdown.
   564  	shutdownOnce sync.Once
   565  
   566  	// Used for RFC 9218 prioritization.
   567  	hasIntermediary bool // connection is done via an intermediary / proxy
   568  	priorityAware   bool // the client has sent priority signal, meaning that it is aware of it.
   569  }
   570  
   571  func (sc *serverConn) writeSchedIgnoresRFC7540() bool {
   572  	switch sc.writeSched.(type) {
   573  	case *priorityWriteSchedulerRFC9218:
   574  		return true
   575  	case *randomWriteScheduler:
   576  		return true
   577  	case *roundRobinWriteScheduler:
   578  		return true
   579  	default:
   580  		return false
   581  	}
   582  }
   583  
   584  const DefaultMaxHeaderBytes = 1 << 20 // keep this in sync with net/http
   585  
   586  func (sc *serverConn) maxHeaderListSize() uint32 {
   587  	n := sc.hs.MaxHeaderBytes()
   588  	if n <= 0 {
   589  		n = DefaultMaxHeaderBytes
   590  	}
   591  	return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
   592  }
   593  
   594  func (sc *serverConn) curOpenStreams() uint32 {
   595  	sc.serveG.check()
   596  	return sc.curClientStreams + sc.curPushedStreams
   597  }
   598  
   599  // stream represents a stream. This is the minimal metadata needed by
   600  // the serve goroutine. Most of the actual stream state is owned by
   601  // the http.Handler's goroutine in the responseWriter. Because the
   602  // responseWriter's responseWriterState is recycled at the end of a
   603  // handler, this struct intentionally has no pointer to the
   604  // *responseWriter{,State} itself, as the Handler ending nils out the
   605  // responseWriter's state field.
   606  type stream struct {
   607  	// immutable:
   608  	sc        *serverConn
   609  	id        uint32
   610  	body      *pipe       // non-nil if expecting DATA frames
   611  	cw        closeWaiter // closed wait stream transitions to closed state
   612  	ctx       context.Context
   613  	cancelCtx func()
   614  
   615  	// owned by serverConn's serve loop:
   616  	bodyBytes        int64   // body bytes seen so far
   617  	declBodyBytes    int64   // or -1 if undeclared
   618  	flow             outflow // limits writing from Handler to client
   619  	inflow           inflow  // what the client is allowed to POST/etc to us
   620  	state            streamState
   621  	resetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream
   622  	gotTrailerHeader bool        // HEADER frame for trailers was seen
   623  	wroteHeaders     bool        // whether we wrote headers (not status 100)
   624  	readDeadline     *time.Timer // nil if unused
   625  	writeDeadline    *time.Timer // nil if unused
   626  	closeErr         error       // set before cw is closed
   627  
   628  	trailer    Header // accumulated trailers
   629  	reqTrailer Header // handler's Request.Trailer
   630  }
   631  
   632  func (sc *serverConn) Framer() *Framer  { return sc.framer }
   633  func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
   634  func (sc *serverConn) Flush() error     { return sc.bw.Flush() }
   635  func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
   636  	return sc.hpackEncoder, &sc.headerWriteBuf
   637  }
   638  
   639  func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
   640  	sc.serveG.check()
   641  	// http://tools.ietf.org/html/rfc7540#section-5.1
   642  	if st, ok := sc.streams[streamID]; ok {
   643  		return st.state, st
   644  	}
   645  	// "The first use of a new stream identifier implicitly closes all
   646  	// streams in the "idle" state that might have been initiated by
   647  	// that peer with a lower-valued stream identifier. For example, if
   648  	// a client sends a HEADERS frame on stream 7 without ever sending a
   649  	// frame on stream 5, then stream 5 transitions to the "closed"
   650  	// state when the first frame for stream 7 is sent or received."
   651  	if streamID%2 == 1 {
   652  		if streamID <= sc.maxClientStreamID {
   653  			return stateClosed, nil
   654  		}
   655  	} else {
   656  		if streamID <= sc.maxPushPromiseID {
   657  			return stateClosed, nil
   658  		}
   659  	}
   660  	return stateIdle, nil
   661  }
   662  
   663  // setConnState calls the net/http ConnState hook for this connection, if configured.
   664  // Note that the net/http package does StateNew and StateClosed for us.
   665  // There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
   666  func (sc *serverConn) setConnState(state ConnState) {
   667  	sc.hs.ConnState(sc.conn, state)
   668  }
   669  
   670  func (sc *serverConn) vlogf(format string, args ...interface{}) {
   671  	if VerboseLogs {
   672  		sc.logf(format, args...)
   673  	}
   674  }
   675  
   676  func (sc *serverConn) logf(format string, args ...interface{}) {
   677  	if lg := sc.hs.ErrorLog(); lg != nil {
   678  		lg.Printf(format, args...)
   679  	} else {
   680  		log.Printf(format, args...)
   681  	}
   682  }
   683  
   684  // errno returns v's underlying uintptr, else 0.
   685  //
   686  // TODO: remove this helper function once http2 can use build
   687  // tags. See comment in isClosedConnError.
   688  func errno(v error) uintptr {
   689  	if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
   690  		return uintptr(rv.Uint())
   691  	}
   692  	return 0
   693  }
   694  
   695  // isClosedConnError reports whether err is an error from use of a closed
   696  // network connection.
   697  func isClosedConnError(err error) bool {
   698  	if err == nil {
   699  		return false
   700  	}
   701  
   702  	if errors.Is(err, net.ErrClosed) {
   703  		return true
   704  	}
   705  
   706  	// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
   707  	// build tags, so I can't make an http2_windows.go file with
   708  	// Windows-specific stuff. Fix that and move this, once we
   709  	// have a way to bundle this into std's net/http somehow.
   710  	if runtime.GOOS == "windows" {
   711  		if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
   712  			if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
   713  				const WSAECONNABORTED = 10053
   714  				const WSAECONNRESET = 10054
   715  				if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
   716  					return true
   717  				}
   718  			}
   719  		}
   720  	}
   721  	return false
   722  }
   723  
   724  func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
   725  	if err == nil {
   726  		return
   727  	}
   728  	if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
   729  		// Boring, expected errors.
   730  		sc.vlogf(format, args...)
   731  	} else {
   732  		sc.logf(format, args...)
   733  	}
   734  }
   735  
   736  // maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
   737  // of the entries in the canonHeader cache.
   738  // This should be larger than the size of unique, uncommon header keys likely to
   739  // be sent by the peer, while not so high as to permit unreasonable memory usage
   740  // if the peer sends an unbounded number of unique header keys.
   741  const maxCachedCanonicalHeadersKeysSize = 2048
   742  
   743  func (sc *serverConn) canonicalHeader(v string) string {
   744  	sc.serveG.check()
   745  	cv, ok := httpcommon.CachedCanonicalHeader(v)
   746  	if ok {
   747  		return cv
   748  	}
   749  	cv, ok = sc.canonHeader[v]
   750  	if ok {
   751  		return cv
   752  	}
   753  	if sc.canonHeader == nil {
   754  		sc.canonHeader = make(map[string]string)
   755  	}
   756  	cv = textproto.CanonicalMIMEHeaderKey(v)
   757  	size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
   758  	if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
   759  		sc.canonHeader[v] = cv
   760  		sc.canonHeaderKeysSize += size
   761  	}
   762  	return cv
   763  }
   764  
   765  type readFrameResult struct {
   766  	f   Frame // valid until readMore is called
   767  	err error
   768  
   769  	// readMore should be called once the consumer no longer needs or
   770  	// retains f. After readMore, f is invalid and more frames can be
   771  	// read.
   772  	readMore func()
   773  }
   774  
   775  // readFrames is the loop that reads incoming frames.
   776  // It takes care to only read one frame at a time, blocking until the
   777  // consumer is done with the frame.
   778  // It's run on its own goroutine.
   779  func (sc *serverConn) readFrames() {
   780  	gate := make(chan struct{})
   781  	gateDone := func() { gate <- struct{}{} }
   782  	for {
   783  		f, err := sc.framer.ReadFrame()
   784  		select {
   785  		case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
   786  		case <-sc.doneServing:
   787  			return
   788  		}
   789  		select {
   790  		case <-gate:
   791  		case <-sc.doneServing:
   792  			return
   793  		}
   794  		if terminalReadFrameError(err) {
   795  			return
   796  		}
   797  	}
   798  }
   799  
   800  // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
   801  type frameWriteResult struct {
   802  	_   incomparable
   803  	wr  FrameWriteRequest // what was written (or attempted)
   804  	err error             // result of the writeFrame call
   805  }
   806  
   807  // writeFrameAsync runs in its own goroutine and writes a single frame
   808  // and then reports when it's done.
   809  // At most one goroutine can be running writeFrameAsync at a time per
   810  // serverConn.
   811  func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
   812  	var err error
   813  	if wd == nil {
   814  		err = wr.write.writeFrame(sc)
   815  	} else {
   816  		err = sc.framer.endWrite()
   817  	}
   818  	sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
   819  }
   820  
   821  func (sc *serverConn) closeAllStreamsOnConnClose() {
   822  	sc.serveG.check()
   823  	for _, st := range sc.streams {
   824  		sc.closeStream(st, errClientDisconnected)
   825  	}
   826  }
   827  
   828  func (sc *serverConn) stopShutdownTimer() {
   829  	sc.serveG.check()
   830  	if t := sc.shutdownTimer; t != nil {
   831  		t.Stop()
   832  	}
   833  }
   834  
   835  func (sc *serverConn) notePanic() {
   836  	// Note: this is for serverConn.serve panicking, not http.Handler code.
   837  	if testHookOnPanicMu != nil {
   838  		testHookOnPanicMu.Lock()
   839  		defer testHookOnPanicMu.Unlock()
   840  	}
   841  	if testHookOnPanic != nil {
   842  		if e := recover(); e != nil {
   843  			if testHookOnPanic(sc, e) {
   844  				panic(e)
   845  			}
   846  		}
   847  	}
   848  }
   849  
   850  func (sc *serverConn) serve(conf Config) {
   851  	sc.serveG.check()
   852  	defer sc.notePanic()
   853  	defer sc.conn.Close()
   854  	defer sc.closeAllStreamsOnConnClose()
   855  	defer sc.stopShutdownTimer()
   856  	defer close(sc.doneServing) // unblocks handlers trying to send
   857  
   858  	if VerboseLogs {
   859  		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
   860  	}
   861  
   862  	settings := writeSettings{
   863  		{SettingMaxFrameSize, uint32(conf.MaxReadFrameSize)},
   864  		{SettingMaxConcurrentStreams, sc.advMaxStreams},
   865  		{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
   866  		{SettingHeaderTableSize, uint32(conf.MaxDecoderHeaderTableSize)},
   867  		{SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
   868  	}
   869  	if !disableExtendedConnectProtocol {
   870  		settings = append(settings, Setting{SettingEnableConnectProtocol, 1})
   871  	}
   872  	if sc.writeSchedIgnoresRFC7540() {
   873  		settings = append(settings, Setting{SettingNoRFC7540Priorities, 1})
   874  	}
   875  	sc.writeFrame(FrameWriteRequest{
   876  		write: settings,
   877  	})
   878  	sc.unackedSettings++
   879  
   880  	// Each connection starts with initialWindowSize inflow tokens.
   881  	// If a higher value is configured, we add more tokens.
   882  	if diff := conf.MaxReceiveBufferPerConnection - initialWindowSize; diff > 0 {
   883  		sc.sendWindowUpdate(nil, int(diff))
   884  	}
   885  
   886  	if err := sc.readPreface(); err != nil {
   887  		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
   888  		return
   889  	}
   890  	// Now that we've got the preface, get us out of the
   891  	// "StateNew" state. We can't go directly to idle, though.
   892  	// Active means we read some data and anticipate a request. We'll
   893  	// do another Active when we get a HEADERS frame.
   894  	sc.setConnState(ConnStateActive)
   895  	sc.setConnState(ConnStateIdle)
   896  
   897  	if sc.srv.IdleTimeout > 0 {
   898  		sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
   899  		defer sc.idleTimer.Stop()
   900  	}
   901  
   902  	if conf.SendPingTimeout > 0 {
   903  		sc.readIdleTimeout = conf.SendPingTimeout
   904  		sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
   905  		defer sc.readIdleTimer.Stop()
   906  	}
   907  
   908  	go sc.readFrames() // closed by defer sc.conn.Close above
   909  
   910  	settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
   911  	defer settingsTimer.Stop()
   912  
   913  	lastFrameTime := time.Now()
   914  	loopNum := 0
   915  	for {
   916  		loopNum++
   917  		select {
   918  		case wr := <-sc.wantWriteFrameCh:
   919  			if se, ok := wr.write.(StreamError); ok {
   920  				sc.resetStream(se)
   921  				break
   922  			}
   923  			sc.writeFrame(wr)
   924  		case res := <-sc.wroteFrameCh:
   925  			sc.wroteFrame(res)
   926  		case res := <-sc.readFrameCh:
   927  			lastFrameTime = time.Now()
   928  			// Process any written frames before reading new frames from the client since a
   929  			// written frame could have triggered a new stream to be started.
   930  			if sc.writingFrameAsync {
   931  				select {
   932  				case wroteRes := <-sc.wroteFrameCh:
   933  					sc.wroteFrame(wroteRes)
   934  				default:
   935  				}
   936  			}
   937  			if !sc.processFrameFromReader(res) {
   938  				return
   939  			}
   940  			res.readMore()
   941  			if settingsTimer != nil {
   942  				settingsTimer.Stop()
   943  				settingsTimer = nil
   944  			}
   945  		case m := <-sc.bodyReadCh:
   946  			sc.noteBodyRead(m.st, m.n)
   947  		case msg := <-sc.serveMsgCh:
   948  			switch v := msg.(type) {
   949  			case func(int):
   950  				v(loopNum) // for testing
   951  			case *serverMessage:
   952  				switch v {
   953  				case settingsTimerMsg:
   954  					sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
   955  					return
   956  				case idleTimerMsg:
   957  					sc.vlogf("connection is idle")
   958  					sc.goAway(ErrCodeNo)
   959  				case readIdleTimerMsg:
   960  					sc.handlePingTimer(lastFrameTime)
   961  				case shutdownTimerMsg:
   962  					sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
   963  					return
   964  				case gracefulShutdownMsg:
   965  					sc.startGracefulShutdownInternal()
   966  				case handlerDoneMsg:
   967  					sc.handlerDone()
   968  				default:
   969  					panic("unknown timer")
   970  				}
   971  			case *startPushRequest:
   972  				sc.startPush(v)
   973  			case func(*serverConn):
   974  				v(sc)
   975  			default:
   976  				panic(fmt.Sprintf("unexpected type %T", v))
   977  			}
   978  		}
   979  
   980  		// If the peer is causing us to generate a lot of control frames,
   981  		// but not reading them from us, assume they are trying to make us
   982  		// run out of memory.
   983  		if sc.queuedControlFrames > maxQueuedControlFrames {
   984  			sc.vlogf("http2: too many control frames in send queue, closing connection")
   985  			return
   986  		}
   987  
   988  		// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
   989  		// with no error code (graceful shutdown), don't start the timer until
   990  		// all open streams have been completed.
   991  		sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
   992  		gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
   993  		if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
   994  			sc.shutDownIn(goAwayTimeout)
   995  		}
   996  	}
   997  }
   998  
   999  func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
  1000  	if sc.pingSent {
  1001  		sc.logf("timeout waiting for PING response")
  1002  		if f := sc.countErrorFunc; f != nil {
  1003  			f("conn_close_lost_ping")
  1004  		}
  1005  		sc.conn.Close()
  1006  		return
  1007  	}
  1008  
  1009  	pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
  1010  	now := time.Now()
  1011  	if pingAt.After(now) {
  1012  		// We received frames since arming the ping timer.
  1013  		// Reset it for the next possible timeout.
  1014  		sc.readIdleTimer.Reset(pingAt.Sub(now))
  1015  		return
  1016  	}
  1017  
  1018  	sc.pingSent = true
  1019  	// Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
  1020  	// is we send a PING frame containing 0s.
  1021  	_, _ = rand.Read(sc.sentPingData[:])
  1022  	sc.writeFrame(FrameWriteRequest{
  1023  		write: &writePing{data: sc.sentPingData},
  1024  	})
  1025  	sc.readIdleTimer.Reset(sc.pingTimeout)
  1026  }
  1027  
  1028  type serverMessage int
  1029  
  1030  // Message values sent to serveMsgCh.
  1031  var (
  1032  	settingsTimerMsg    = new(serverMessage)
  1033  	idleTimerMsg        = new(serverMessage)
  1034  	readIdleTimerMsg    = new(serverMessage)
  1035  	shutdownTimerMsg    = new(serverMessage)
  1036  	gracefulShutdownMsg = new(serverMessage)
  1037  	handlerDoneMsg      = new(serverMessage)
  1038  )
  1039  
  1040  func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
  1041  func (sc *serverConn) onIdleTimer()     { sc.sendServeMsg(idleTimerMsg) }
  1042  func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
  1043  func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
  1044  
  1045  func (sc *serverConn) sendServeMsg(msg interface{}) {
  1046  	sc.serveG.checkNotOn() // NOT
  1047  	select {
  1048  	case sc.serveMsgCh <- msg:
  1049  	case <-sc.doneServing:
  1050  	}
  1051  }
  1052  
  1053  var errPrefaceTimeout = errors.New("timeout waiting for client preface")
  1054  
  1055  // readPreface reads the ClientPreface greeting from the peer or
  1056  // returns errPrefaceTimeout on timeout, or an error if the greeting
  1057  // is invalid.
  1058  func (sc *serverConn) readPreface() error {
  1059  	if sc.sawClientPreface {
  1060  		return nil
  1061  	}
  1062  	errc := make(chan error, 1)
  1063  	go func() {
  1064  		// Read the client preface
  1065  		buf := make([]byte, len(ClientPreface))
  1066  		if _, err := io.ReadFull(sc.conn, buf); err != nil {
  1067  			errc <- err
  1068  		} else if !bytes.Equal(buf, clientPreface) {
  1069  			errc <- fmt.Errorf("bogus greeting %q", buf)
  1070  		} else {
  1071  			errc <- nil
  1072  		}
  1073  	}()
  1074  	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
  1075  	defer timer.Stop()
  1076  	select {
  1077  	case <-timer.C:
  1078  		return errPrefaceTimeout
  1079  	case err := <-errc:
  1080  		if err == nil {
  1081  			if VerboseLogs {
  1082  				sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
  1083  			}
  1084  		}
  1085  		return err
  1086  	}
  1087  }
  1088  
  1089  var writeDataPool = sync.Pool{
  1090  	New: func() interface{} { return new(writeData) },
  1091  }
  1092  
  1093  // writeDataFromHandler writes DATA response frames from a handler on
  1094  // the given stream.
  1095  func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
  1096  	ch := sc.srv.state.getErrChan()
  1097  	writeArg := writeDataPool.Get().(*writeData)
  1098  	*writeArg = writeData{stream.id, data, endStream}
  1099  	err := sc.writeFrameFromHandler(FrameWriteRequest{
  1100  		write:  writeArg,
  1101  		stream: stream,
  1102  		done:   ch,
  1103  	})
  1104  	if err != nil {
  1105  		return err
  1106  	}
  1107  	var frameWriteDone bool // the frame write is done (successfully or not)
  1108  	select {
  1109  	case err = <-ch:
  1110  		frameWriteDone = true
  1111  	case <-sc.doneServing:
  1112  		return errClientDisconnected
  1113  	case <-stream.cw:
  1114  		// If both ch and stream.cw were ready (as might
  1115  		// happen on the final Write after an http.Handler
  1116  		// ends), prefer the write result. Otherwise this
  1117  		// might just be us successfully closing the stream.
  1118  		// The writeFrameAsync and serve goroutines guarantee
  1119  		// that the ch send will happen before the stream.cw
  1120  		// close.
  1121  		select {
  1122  		case err = <-ch:
  1123  			frameWriteDone = true
  1124  		default:
  1125  			return errStreamClosed
  1126  		}
  1127  	}
  1128  	sc.srv.state.putErrChan(ch)
  1129  	if frameWriteDone {
  1130  		writeDataPool.Put(writeArg)
  1131  	}
  1132  	return err
  1133  }
  1134  
  1135  // writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
  1136  // if the connection has gone away.
  1137  //
  1138  // This must not be run from the serve goroutine itself, else it might
  1139  // deadlock writing to sc.wantWriteFrameCh (which is only mildly
  1140  // buffered and is read by serve itself). If you're on the serve
  1141  // goroutine, call writeFrame instead.
  1142  func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
  1143  	sc.serveG.checkNotOn() // NOT
  1144  	select {
  1145  	case sc.wantWriteFrameCh <- wr:
  1146  		return nil
  1147  	case <-sc.doneServing:
  1148  		// Serve loop is gone.
  1149  		// Client has closed their connection to the server.
  1150  		return errClientDisconnected
  1151  	}
  1152  }
  1153  
  1154  // writeFrame schedules a frame to write and sends it if there's nothing
  1155  // already being written.
  1156  //
  1157  // There is no pushback here (the serve goroutine never blocks). It's
  1158  // the http.Handlers that block, waiting for their previous frames to
  1159  // make it onto the wire
  1160  //
  1161  // If you're not on the serve goroutine, use writeFrameFromHandler instead.
  1162  func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
  1163  	sc.serveG.check()
  1164  
  1165  	// If true, wr will not be written and wr.done will not be signaled.
  1166  	var ignoreWrite bool
  1167  
  1168  	// We are not allowed to write frames on closed streams. RFC 7540 Section
  1169  	// 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
  1170  	// a closed stream." Our server never sends PRIORITY, so that exception
  1171  	// does not apply.
  1172  	//
  1173  	// The serverConn might close an open stream while the stream's handler
  1174  	// is still running. For example, the server might close a stream when it
  1175  	// receives bad data from the client. If this happens, the handler might
  1176  	// attempt to write a frame after the stream has been closed (since the
  1177  	// handler hasn't yet been notified of the close). In this case, we simply
  1178  	// ignore the frame. The handler will notice that the stream is closed when
  1179  	// it waits for the frame to be written.
  1180  	//
  1181  	// As an exception to this rule, we allow sending RST_STREAM after close.
  1182  	// This allows us to immediately reject new streams without tracking any
  1183  	// state for those streams (except for the queued RST_STREAM frame). This
  1184  	// may result in duplicate RST_STREAMs in some cases, but the client should
  1185  	// ignore those.
  1186  	if wr.StreamID() != 0 {
  1187  		_, isReset := wr.write.(StreamError)
  1188  		if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
  1189  			ignoreWrite = true
  1190  		}
  1191  	}
  1192  
  1193  	// Don't send a 100-continue response if we've already sent headers.
  1194  	// See golang.org/issue/14030.
  1195  	switch wr.write.(type) {
  1196  	case *writeResHeaders:
  1197  		wr.stream.wroteHeaders = true
  1198  	case write100ContinueHeadersFrame:
  1199  		if wr.stream.wroteHeaders {
  1200  			// We do not need to notify wr.done because this frame is
  1201  			// never written with wr.done != nil.
  1202  			if wr.done != nil {
  1203  				panic("wr.done != nil for write100ContinueHeadersFrame")
  1204  			}
  1205  			ignoreWrite = true
  1206  		}
  1207  	}
  1208  
  1209  	if !ignoreWrite {
  1210  		if wr.isControl() {
  1211  			sc.queuedControlFrames++
  1212  			// For extra safety, detect wraparounds, which should not happen,
  1213  			// and pull the plug.
  1214  			if sc.queuedControlFrames < 0 {
  1215  				sc.conn.Close()
  1216  			}
  1217  		}
  1218  		sc.writeSched.Push(wr)
  1219  	}
  1220  	sc.scheduleFrameWrite()
  1221  }
  1222  
  1223  // startFrameWrite starts a goroutine to write wr (in a separate
  1224  // goroutine since that might block on the network), and updates the
  1225  // serve goroutine's state about the world, updated from info in wr.
  1226  func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
  1227  	sc.serveG.check()
  1228  	if sc.writingFrame {
  1229  		panic("internal error: can only be writing one frame at a time")
  1230  	}
  1231  
  1232  	st := wr.stream
  1233  	if st != nil {
  1234  		switch st.state {
  1235  		case stateHalfClosedLocal:
  1236  			switch wr.write.(type) {
  1237  			case StreamError, handlerPanicRST, writeWindowUpdate:
  1238  				// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
  1239  				// in this state. (We never send PRIORITY from the server, so that is not checked.)
  1240  			default:
  1241  				panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
  1242  			}
  1243  		case stateClosed:
  1244  			panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
  1245  		}
  1246  	}
  1247  	if wpp, ok := wr.write.(*writePushPromise); ok {
  1248  		var err error
  1249  		wpp.promisedID, err = wpp.allocatePromisedID()
  1250  		if err != nil {
  1251  			sc.writingFrameAsync = false
  1252  			wr.replyToWriter(err)
  1253  			return
  1254  		}
  1255  	}
  1256  
  1257  	sc.writingFrame = true
  1258  	sc.needsFrameFlush = true
  1259  	if wr.write.staysWithinBuffer(sc.bw.Available()) {
  1260  		sc.writingFrameAsync = false
  1261  		err := wr.write.writeFrame(sc)
  1262  		sc.wroteFrame(frameWriteResult{wr: wr, err: err})
  1263  	} else if wd, ok := wr.write.(*writeData); ok {
  1264  		// Encode the frame in the serve goroutine, to ensure we don't have
  1265  		// any lingering asynchronous references to data passed to Write.
  1266  		// See https://go.dev/issue/58446.
  1267  		sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
  1268  		sc.writingFrameAsync = true
  1269  		go sc.writeFrameAsync(wr, wd)
  1270  	} else {
  1271  		sc.writingFrameAsync = true
  1272  		go sc.writeFrameAsync(wr, nil)
  1273  	}
  1274  }
  1275  
  1276  // errHandlerPanicked is the error given to any callers blocked in a read from
  1277  // Request.Body when the main goroutine panics. Since most handlers read in the
  1278  // main ServeHTTP goroutine, this will show up rarely.
  1279  var errHandlerPanicked = errors.New("http2: handler panicked")
  1280  
  1281  // wroteFrame is called on the serve goroutine with the result of
  1282  // whatever happened on writeFrameAsync.
  1283  func (sc *serverConn) wroteFrame(res frameWriteResult) {
  1284  	sc.serveG.check()
  1285  	if !sc.writingFrame {
  1286  		panic("internal error: expected to be already writing a frame")
  1287  	}
  1288  	sc.writingFrame = false
  1289  	sc.writingFrameAsync = false
  1290  
  1291  	if res.err != nil {
  1292  		sc.conn.Close()
  1293  	}
  1294  
  1295  	wr := res.wr
  1296  
  1297  	if writeEndsStream(wr.write) {
  1298  		st := wr.stream
  1299  		if st == nil {
  1300  			panic("internal error: expecting non-nil stream")
  1301  		}
  1302  		switch st.state {
  1303  		case stateOpen:
  1304  			// Here we would go to stateHalfClosedLocal in
  1305  			// theory, but since our handler is done and
  1306  			// the net/http package provides no mechanism
  1307  			// for closing a ResponseWriter while still
  1308  			// reading data (see possible TODO at top of
  1309  			// this file), we go into closed state here
  1310  			// anyway, after telling the peer we're
  1311  			// hanging up on them. We'll transition to
  1312  			// stateClosed after the RST_STREAM frame is
  1313  			// written.
  1314  			st.state = stateHalfClosedLocal
  1315  			// Section 8.1: a server MAY request that the client abort
  1316  			// transmission of a request without error by sending a
  1317  			// RST_STREAM with an error code of NO_ERROR after sending
  1318  			// a complete response.
  1319  			sc.resetStream(streamError(st.id, ErrCodeNo))
  1320  		case stateHalfClosedRemote:
  1321  			sc.closeStream(st, errHandlerComplete)
  1322  		}
  1323  	} else {
  1324  		switch v := wr.write.(type) {
  1325  		case StreamError:
  1326  			// st may be unknown if the RST_STREAM was generated to reject bad input.
  1327  			if st, ok := sc.streams[v.StreamID]; ok {
  1328  				sc.closeStream(st, v)
  1329  			}
  1330  		case handlerPanicRST:
  1331  			sc.closeStream(wr.stream, errHandlerPanicked)
  1332  		}
  1333  	}
  1334  
  1335  	// Reply (if requested) to unblock the ServeHTTP goroutine.
  1336  	wr.replyToWriter(res.err)
  1337  
  1338  	sc.scheduleFrameWrite()
  1339  }
  1340  
  1341  // scheduleFrameWrite tickles the frame writing scheduler.
  1342  //
  1343  // If a frame is already being written, nothing happens. This will be called again
  1344  // when the frame is done being written.
  1345  //
  1346  // If a frame isn't being written and we need to send one, the best frame
  1347  // to send is selected by writeSched.
  1348  //
  1349  // If a frame isn't being written and there's nothing else to send, we
  1350  // flush the write buffer.
  1351  func (sc *serverConn) scheduleFrameWrite() {
  1352  	sc.serveG.check()
  1353  	if sc.writingFrame || sc.inFrameScheduleLoop {
  1354  		return
  1355  	}
  1356  	sc.inFrameScheduleLoop = true
  1357  	for !sc.writingFrameAsync {
  1358  		if sc.needToSendGoAway {
  1359  			sc.needToSendGoAway = false
  1360  			sc.startFrameWrite(FrameWriteRequest{
  1361  				write: &writeGoAway{
  1362  					maxStreamID: sc.maxClientStreamID,
  1363  					code:        sc.goAwayCode,
  1364  				},
  1365  			})
  1366  			continue
  1367  		}
  1368  		if sc.needToSendSettingsAck {
  1369  			sc.needToSendSettingsAck = false
  1370  			sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
  1371  			continue
  1372  		}
  1373  		if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
  1374  			if wr, ok := sc.writeSched.Pop(); ok {
  1375  				if wr.isControl() {
  1376  					sc.queuedControlFrames--
  1377  				}
  1378  				sc.startFrameWrite(wr)
  1379  				continue
  1380  			}
  1381  		}
  1382  		if sc.needsFrameFlush {
  1383  			sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
  1384  			sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
  1385  			continue
  1386  		}
  1387  		break
  1388  	}
  1389  	sc.inFrameScheduleLoop = false
  1390  }
  1391  
  1392  // startGracefulShutdown gracefully shuts down a connection. This
  1393  // sends GOAWAY with ErrCodeNo to tell the client we're gracefully
  1394  // shutting down. The connection isn't closed until all current
  1395  // streams are done.
  1396  //
  1397  // startGracefulShutdown returns immediately; it does not wait until
  1398  // the connection has shut down.
  1399  func (sc *serverConn) startGracefulShutdown() {
  1400  	sc.serveG.checkNotOn() // NOT
  1401  	sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
  1402  }
  1403  
  1404  // After sending GOAWAY with an error code (non-graceful shutdown), the
  1405  // connection will close after goAwayTimeout.
  1406  //
  1407  // If we close the connection immediately after sending GOAWAY, there may
  1408  // be unsent data in our kernel receive buffer, which will cause the kernel
  1409  // to send a TCP RST on close() instead of a FIN. This RST will abort the
  1410  // connection immediately, whether or not the client had received the GOAWAY.
  1411  //
  1412  // Ideally we should delay for at least 1 RTT + epsilon so the client has
  1413  // a chance to read the GOAWAY and stop sending messages. Measuring RTT
  1414  // is hard, so we approximate with 1 second. See golang.org/issue/18701.
  1415  //
  1416  // This is a var so it can be shorter in tests, where all requests uses the
  1417  // loopback interface making the expected RTT very small.
  1418  //
  1419  // TODO: configurable?
  1420  var goAwayTimeout = 1 * time.Second
  1421  
  1422  func (sc *serverConn) startGracefulShutdownInternal() {
  1423  	sc.goAway(ErrCodeNo)
  1424  }
  1425  
  1426  func (sc *serverConn) goAway(code ErrCode) {
  1427  	sc.serveG.check()
  1428  	if sc.inGoAway {
  1429  		if sc.goAwayCode == ErrCodeNo {
  1430  			sc.goAwayCode = code
  1431  		}
  1432  		return
  1433  	}
  1434  	sc.inGoAway = true
  1435  	sc.needToSendGoAway = true
  1436  	sc.goAwayCode = code
  1437  	sc.scheduleFrameWrite()
  1438  }
  1439  
  1440  func (sc *serverConn) shutDownIn(d time.Duration) {
  1441  	sc.serveG.check()
  1442  	sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
  1443  }
  1444  
  1445  func (sc *serverConn) resetStream(se StreamError) {
  1446  	sc.serveG.check()
  1447  	sc.writeFrame(FrameWriteRequest{write: se})
  1448  	if st, ok := sc.streams[se.StreamID]; ok {
  1449  		st.resetQueued = true
  1450  	}
  1451  }
  1452  
  1453  // processFrameFromReader processes the serve loop's read from readFrameCh from the
  1454  // frame-reading goroutine.
  1455  // processFrameFromReader returns whether the connection should be kept open.
  1456  func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
  1457  	sc.serveG.check()
  1458  	err := res.err
  1459  	if err != nil {
  1460  		if err == ErrFrameTooLarge {
  1461  			sc.goAway(ErrCodeFrameSize)
  1462  			return true // goAway will close the loop
  1463  		}
  1464  		clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
  1465  		if clientGone {
  1466  			// TODO: could we also get into this state if
  1467  			// the peer does a half close
  1468  			// (e.g. CloseWrite) because they're done
  1469  			// sending frames but they're still wanting
  1470  			// our open replies?  Investigate.
  1471  			// TODO: add CloseWrite to crypto/tls.Conn first
  1472  			// so we have a way to test this? I suppose
  1473  			// just for testing we could have a non-TLS mode.
  1474  			return false
  1475  		}
  1476  	} else {
  1477  		f := res.f
  1478  		if VerboseLogs {
  1479  			sc.vlogf("http2: server read frame %v", summarizeFrame(f))
  1480  		}
  1481  		err = sc.processFrame(f)
  1482  		if err == nil {
  1483  			return true
  1484  		}
  1485  	}
  1486  
  1487  	switch ev := err.(type) {
  1488  	case StreamError:
  1489  		sc.resetStream(ev)
  1490  		return true
  1491  	case goAwayFlowError:
  1492  		sc.goAway(ErrCodeFlowControl)
  1493  		return true
  1494  	case ConnectionError:
  1495  		if res.f != nil {
  1496  			if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
  1497  				sc.maxClientStreamID = id
  1498  			}
  1499  		}
  1500  		sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
  1501  		sc.goAway(ErrCode(ev))
  1502  		return true // goAway will handle shutdown
  1503  	default:
  1504  		if res.err != nil {
  1505  			sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
  1506  		} else {
  1507  			sc.logf("http2: server closing client connection: %v", err)
  1508  		}
  1509  		return false
  1510  	}
  1511  }
  1512  
  1513  func (sc *serverConn) processFrame(f Frame) error {
  1514  	sc.serveG.check()
  1515  
  1516  	// First frame received must be SETTINGS.
  1517  	if !sc.sawFirstSettings {
  1518  		if _, ok := f.(*SettingsFrame); !ok {
  1519  			return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
  1520  		}
  1521  		sc.sawFirstSettings = true
  1522  	}
  1523  
  1524  	// Discard frames for streams initiated after the identified last
  1525  	// stream sent in a GOAWAY, or all frames after sending an error.
  1526  	// We still need to return connection-level flow control for DATA frames.
  1527  	// RFC 9113 Section 6.8.
  1528  	if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
  1529  
  1530  		if f, ok := f.(*DataFrame); ok {
  1531  			if !sc.inflow.take(f.Length) {
  1532  				return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
  1533  			}
  1534  			sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1535  		}
  1536  		return nil
  1537  	}
  1538  
  1539  	switch f := f.(type) {
  1540  	case *SettingsFrame:
  1541  		return sc.processSettings(f)
  1542  	case *MetaHeadersFrame:
  1543  		return sc.processHeaders(f)
  1544  	case *WindowUpdateFrame:
  1545  		return sc.processWindowUpdate(f)
  1546  	case *PingFrame:
  1547  		return sc.processPing(f)
  1548  	case *DataFrame:
  1549  		return sc.processData(f)
  1550  	case *RSTStreamFrame:
  1551  		return sc.processResetStream(f)
  1552  	case *PriorityFrame:
  1553  		return sc.processPriority(f)
  1554  	case *GoAwayFrame:
  1555  		return sc.processGoAway(f)
  1556  	case *PushPromiseFrame:
  1557  		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
  1558  		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
  1559  		return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
  1560  	case *PriorityUpdateFrame:
  1561  		return sc.processPriorityUpdate(f)
  1562  	default:
  1563  		sc.vlogf("http2: server ignoring frame: %v", f.Header())
  1564  		return nil
  1565  	}
  1566  }
  1567  
  1568  func (sc *serverConn) processPing(f *PingFrame) error {
  1569  	sc.serveG.check()
  1570  	if f.IsAck() {
  1571  		if sc.pingSent && sc.sentPingData == f.Data {
  1572  			// This is a response to a PING we sent.
  1573  			sc.pingSent = false
  1574  			sc.readIdleTimer.Reset(sc.readIdleTimeout)
  1575  		}
  1576  		// 6.7 PING: " An endpoint MUST NOT respond to PING frames
  1577  		// containing this flag."
  1578  		return nil
  1579  	}
  1580  	if f.StreamID != 0 {
  1581  		// "PING frames are not associated with any individual
  1582  		// stream. If a PING frame is received with a stream
  1583  		// identifier field value other than 0x0, the recipient MUST
  1584  		// respond with a connection error (Section 5.4.1) of type
  1585  		// PROTOCOL_ERROR."
  1586  		return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
  1587  	}
  1588  	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
  1589  	return nil
  1590  }
  1591  
  1592  func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
  1593  	sc.serveG.check()
  1594  	switch {
  1595  	case f.StreamID != 0: // stream-level flow control
  1596  		state, st := sc.state(f.StreamID)
  1597  		if state == stateIdle {
  1598  			// Section 5.1: "Receiving any frame other than HEADERS
  1599  			// or PRIORITY on a stream in this state MUST be
  1600  			// treated as a connection error (Section 5.4.1) of
  1601  			// type PROTOCOL_ERROR."
  1602  			return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
  1603  		}
  1604  		if st == nil {
  1605  			// "WINDOW_UPDATE can be sent by a peer that has sent a
  1606  			// frame bearing the END_STREAM flag. This means that a
  1607  			// receiver could receive a WINDOW_UPDATE frame on a "half
  1608  			// closed (remote)" or "closed" stream. A receiver MUST
  1609  			// NOT treat this as an error, see Section 5.1."
  1610  			return nil
  1611  		}
  1612  		if !st.flow.add(int32(f.Increment)) {
  1613  			return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
  1614  		}
  1615  	default: // connection-level flow control
  1616  		if !sc.flow.add(int32(f.Increment)) {
  1617  			return goAwayFlowError{}
  1618  		}
  1619  	}
  1620  	sc.scheduleFrameWrite()
  1621  	return nil
  1622  }
  1623  
  1624  func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
  1625  	sc.serveG.check()
  1626  
  1627  	state, st := sc.state(f.StreamID)
  1628  	if state == stateIdle {
  1629  		// 6.4 "RST_STREAM frames MUST NOT be sent for a
  1630  		// stream in the "idle" state. If a RST_STREAM frame
  1631  		// identifying an idle stream is received, the
  1632  		// recipient MUST treat this as a connection error
  1633  		// (Section 5.4.1) of type PROTOCOL_ERROR.
  1634  		return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
  1635  	}
  1636  	if st != nil {
  1637  		st.cancelCtx()
  1638  		sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
  1639  	}
  1640  	return nil
  1641  }
  1642  
  1643  func (sc *serverConn) closeStream(st *stream, err error) {
  1644  	sc.serveG.check()
  1645  	if st.state == stateIdle || st.state == stateClosed {
  1646  		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
  1647  	}
  1648  	st.state = stateClosed
  1649  	if st.readDeadline != nil {
  1650  		st.readDeadline.Stop()
  1651  	}
  1652  	if st.writeDeadline != nil {
  1653  		st.writeDeadline.Stop()
  1654  	}
  1655  	if st.isPushed() {
  1656  		sc.curPushedStreams--
  1657  	} else {
  1658  		sc.curClientStreams--
  1659  	}
  1660  	delete(sc.streams, st.id)
  1661  	if len(sc.streams) == 0 {
  1662  		sc.setConnState(ConnStateIdle)
  1663  		if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil {
  1664  			sc.idleTimer.Reset(sc.srv.IdleTimeout)
  1665  		}
  1666  		if h1ServerKeepAlivesDisabled(sc.hs) {
  1667  			sc.startGracefulShutdownInternal()
  1668  		}
  1669  	}
  1670  	if p := st.body; p != nil {
  1671  		// Return any buffered unread bytes worth of conn-level flow control.
  1672  		// See golang.org/issue/16481
  1673  		sc.sendWindowUpdate(nil, p.Len())
  1674  
  1675  		p.CloseWithError(err)
  1676  	}
  1677  	if e, ok := err.(StreamError); ok {
  1678  		if e.Cause != nil {
  1679  			err = e.Cause
  1680  		} else {
  1681  			err = errStreamClosed
  1682  		}
  1683  	}
  1684  	st.closeErr = err
  1685  	st.cancelCtx()
  1686  	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
  1687  	sc.writeSched.CloseStream(st.id)
  1688  }
  1689  
  1690  func (sc *serverConn) processSettings(f *SettingsFrame) error {
  1691  	sc.serveG.check()
  1692  	if f.IsAck() {
  1693  		sc.unackedSettings--
  1694  		if sc.unackedSettings < 0 {
  1695  			// Why is the peer ACKing settings we never sent?
  1696  			// The spec doesn't mention this case, but
  1697  			// hang up on them anyway.
  1698  			return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
  1699  		}
  1700  		return nil
  1701  	}
  1702  	if f.NumSettings() > 100 || f.HasDuplicates() {
  1703  		// This isn't actually in the spec, but hang up on
  1704  		// suspiciously large settings frames or those with
  1705  		// duplicate entries.
  1706  		return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
  1707  	}
  1708  	if err := f.ForeachSetting(sc.processSetting); err != nil {
  1709  		return err
  1710  	}
  1711  	// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
  1712  	// acknowledged individually, even if multiple are received before the ACK.
  1713  	sc.needToSendSettingsAck = true
  1714  	sc.scheduleFrameWrite()
  1715  	return nil
  1716  }
  1717  
  1718  func (sc *serverConn) processSetting(s Setting) error {
  1719  	sc.serveG.check()
  1720  	if err := s.Valid(); err != nil {
  1721  		return err
  1722  	}
  1723  	if VerboseLogs {
  1724  		sc.vlogf("http2: server processing setting %v", s)
  1725  	}
  1726  	switch s.ID {
  1727  	case SettingHeaderTableSize:
  1728  		sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
  1729  	case SettingEnablePush:
  1730  		sc.pushEnabled = s.Val != 0
  1731  	case SettingMaxConcurrentStreams:
  1732  		sc.clientMaxStreams = s.Val
  1733  	case SettingInitialWindowSize:
  1734  		return sc.processSettingInitialWindowSize(s.Val)
  1735  	case SettingMaxFrameSize:
  1736  		sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
  1737  	case SettingMaxHeaderListSize:
  1738  		sc.peerMaxHeaderListSize = s.Val
  1739  	case SettingEnableConnectProtocol:
  1740  		// Receipt of this parameter by a server does not
  1741  		// have any impact
  1742  	case SettingNoRFC7540Priorities:
  1743  		if s.Val > 1 {
  1744  			return ConnectionError(ErrCodeProtocol)
  1745  		}
  1746  	default:
  1747  		// Unknown setting: "An endpoint that receives a SETTINGS
  1748  		// frame with any unknown or unsupported identifier MUST
  1749  		// ignore that setting."
  1750  		if VerboseLogs {
  1751  			sc.vlogf("http2: server ignoring unknown setting %v", s)
  1752  		}
  1753  	}
  1754  	return nil
  1755  }
  1756  
  1757  func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
  1758  	sc.serveG.check()
  1759  	// Note: val already validated to be within range by
  1760  	// processSetting's Valid call.
  1761  
  1762  	// "A SETTINGS frame can alter the initial flow control window
  1763  	// size for all current streams. When the value of
  1764  	// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
  1765  	// adjust the size of all stream flow control windows that it
  1766  	// maintains by the difference between the new value and the
  1767  	// old value."
  1768  	old := sc.initialStreamSendWindowSize
  1769  	sc.initialStreamSendWindowSize = int32(val)
  1770  	growth := int32(val) - old // may be negative
  1771  	for _, st := range sc.streams {
  1772  		if !st.flow.add(growth) {
  1773  			// 6.9.2 Initial Flow Control Window Size
  1774  			// "An endpoint MUST treat a change to
  1775  			// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
  1776  			// control window to exceed the maximum size as a
  1777  			// connection error (Section 5.4.1) of type
  1778  			// FLOW_CONTROL_ERROR."
  1779  			return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
  1780  		}
  1781  	}
  1782  	return nil
  1783  }
  1784  
  1785  func (sc *serverConn) processData(f *DataFrame) error {
  1786  	sc.serveG.check()
  1787  	id := f.Header().StreamID
  1788  
  1789  	data := f.Data()
  1790  	state, st := sc.state(id)
  1791  	if id == 0 || state == stateIdle {
  1792  		// Section 6.1: "DATA frames MUST be associated with a
  1793  		// stream. If a DATA frame is received whose stream
  1794  		// identifier field is 0x0, the recipient MUST respond
  1795  		// with a connection error (Section 5.4.1) of type
  1796  		// PROTOCOL_ERROR."
  1797  		//
  1798  		// Section 5.1: "Receiving any frame other than HEADERS
  1799  		// or PRIORITY on a stream in this state MUST be
  1800  		// treated as a connection error (Section 5.4.1) of
  1801  		// type PROTOCOL_ERROR."
  1802  		return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
  1803  	}
  1804  
  1805  	// "If a DATA frame is received whose stream is not in "open"
  1806  	// or "half closed (local)" state, the recipient MUST respond
  1807  	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
  1808  	if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
  1809  		// This includes sending a RST_STREAM if the stream is
  1810  		// in stateHalfClosedLocal (which currently means that
  1811  		// the http.Handler returned, so it's done reading &
  1812  		// done writing). Try to stop the client from sending
  1813  		// more DATA.
  1814  
  1815  		// But still enforce their connection-level flow control,
  1816  		// and return any flow control bytes since we're not going
  1817  		// to consume them.
  1818  		if !sc.inflow.take(f.Length) {
  1819  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
  1820  		}
  1821  		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1822  
  1823  		if st != nil && st.resetQueued {
  1824  			// Already have a stream error in flight. Don't send another.
  1825  			return nil
  1826  		}
  1827  		return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
  1828  	}
  1829  	if st.body == nil {
  1830  		panic("internal error: should have a body in this state")
  1831  	}
  1832  
  1833  	// Sender sending more than they'd declared?
  1834  	if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
  1835  		if !sc.inflow.take(f.Length) {
  1836  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
  1837  		}
  1838  		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1839  
  1840  		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
  1841  		// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
  1842  		// value of a content-length header field does not equal the sum of the
  1843  		// DATA frame payload lengths that form the body.
  1844  		return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
  1845  	}
  1846  	if f.Length > 0 {
  1847  		// Check whether the client has flow control quota.
  1848  		if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
  1849  			return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
  1850  		}
  1851  
  1852  		if len(data) > 0 {
  1853  			st.bodyBytes += int64(len(data))
  1854  			wrote, err := st.body.Write(data)
  1855  			if err != nil {
  1856  				// The handler has closed the request body.
  1857  				// Return the connection-level flow control for the discarded data,
  1858  				// but not the stream-level flow control.
  1859  				sc.sendWindowUpdate(nil, int(f.Length)-wrote)
  1860  				return nil
  1861  			}
  1862  			if wrote != len(data) {
  1863  				panic("internal error: bad Writer")
  1864  			}
  1865  		}
  1866  
  1867  		// Return any padded flow control now, since we won't
  1868  		// refund it later on body reads.
  1869  		// Call sendWindowUpdate even if there is no padding,
  1870  		// to return buffered flow control credit if the sent
  1871  		// window has shrunk.
  1872  		pad := int32(f.Length) - int32(len(data))
  1873  		sc.sendWindowUpdate32(nil, pad)
  1874  		sc.sendWindowUpdate32(st, pad)
  1875  	}
  1876  	if f.StreamEnded() {
  1877  		st.endStream()
  1878  	}
  1879  	return nil
  1880  }
  1881  
  1882  func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
  1883  	sc.serveG.check()
  1884  	if f.ErrCode != ErrCodeNo {
  1885  		sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
  1886  	} else {
  1887  		sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
  1888  	}
  1889  	sc.startGracefulShutdownInternal()
  1890  	// http://tools.ietf.org/html/rfc7540#section-6.8
  1891  	// We should not create any new streams, which means we should disable push.
  1892  	sc.pushEnabled = false
  1893  	return nil
  1894  }
  1895  
  1896  // isPushed reports whether the stream is server-initiated.
  1897  func (st *stream) isPushed() bool {
  1898  	return st.id%2 == 0
  1899  }
  1900  
  1901  // endStream closes a Request.Body's pipe. It is called when a DATA
  1902  // frame says a request body is over (or after trailers).
  1903  func (st *stream) endStream() {
  1904  	sc := st.sc
  1905  	sc.serveG.check()
  1906  
  1907  	if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
  1908  		st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
  1909  			st.declBodyBytes, st.bodyBytes))
  1910  	} else {
  1911  		st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
  1912  		st.body.CloseWithError(io.EOF)
  1913  	}
  1914  	st.state = stateHalfClosedRemote
  1915  }
  1916  
  1917  // copyTrailersToHandlerRequest is run in the Handler's goroutine in
  1918  // its Request.Body.Read just before it gets io.EOF.
  1919  func (st *stream) copyTrailersToHandlerRequest() {
  1920  	for k, vv := range st.trailer {
  1921  		if _, ok := st.reqTrailer[k]; ok {
  1922  			// Only copy it over it was pre-declared.
  1923  			st.reqTrailer[k] = vv
  1924  		}
  1925  	}
  1926  }
  1927  
  1928  // onReadTimeout is run on its own goroutine (from time.AfterFunc)
  1929  // when the stream's ReadTimeout has fired.
  1930  func (st *stream) onReadTimeout() {
  1931  	if st.body != nil {
  1932  		// Wrap the ErrDeadlineExceeded to avoid callers depending on us
  1933  		// returning the bare error.
  1934  		st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
  1935  	}
  1936  }
  1937  
  1938  // onWriteTimeout is run on its own goroutine (from time.AfterFunc)
  1939  // when the stream's WriteTimeout has fired.
  1940  func (st *stream) onWriteTimeout() {
  1941  	st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
  1942  		StreamID: st.id,
  1943  		Code:     ErrCodeInternal,
  1944  		Cause:    os.ErrDeadlineExceeded,
  1945  	}})
  1946  }
  1947  
  1948  func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
  1949  	sc.serveG.check()
  1950  	id := f.StreamID
  1951  	// http://tools.ietf.org/html/rfc7540#section-5.1.1
  1952  	// Streams initiated by a client MUST use odd-numbered stream
  1953  	// identifiers. [...] An endpoint that receives an unexpected
  1954  	// stream identifier MUST respond with a connection error
  1955  	// (Section 5.4.1) of type PROTOCOL_ERROR.
  1956  	if id%2 != 1 {
  1957  		return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
  1958  	}
  1959  	// A HEADERS frame can be used to create a new stream or
  1960  	// send a trailer for an open one. If we already have a stream
  1961  	// open, let it process its own HEADERS frame (trailers at this
  1962  	// point, if it's valid).
  1963  	if st := sc.streams[f.StreamID]; st != nil {
  1964  		if st.resetQueued {
  1965  			// We're sending RST_STREAM to close the stream, so don't bother
  1966  			// processing this frame.
  1967  			return nil
  1968  		}
  1969  		// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
  1970  		// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
  1971  		// this state, it MUST respond with a stream error (Section 5.4.2) of
  1972  		// type STREAM_CLOSED.
  1973  		if st.state == stateHalfClosedRemote {
  1974  			return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
  1975  		}
  1976  		return st.processTrailerHeaders(f)
  1977  	}
  1978  
  1979  	// [...] The identifier of a newly established stream MUST be
  1980  	// numerically greater than all streams that the initiating
  1981  	// endpoint has opened or reserved. [...]  An endpoint that
  1982  	// receives an unexpected stream identifier MUST respond with
  1983  	// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
  1984  	if id <= sc.maxClientStreamID {
  1985  		return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
  1986  	}
  1987  	sc.maxClientStreamID = id
  1988  
  1989  	if sc.idleTimer != nil {
  1990  		sc.idleTimer.Stop()
  1991  	}
  1992  
  1993  	// http://tools.ietf.org/html/rfc7540#section-5.1.2
  1994  	// [...] Endpoints MUST NOT exceed the limit set by their peer. An
  1995  	// endpoint that receives a HEADERS frame that causes their
  1996  	// advertised concurrent stream limit to be exceeded MUST treat
  1997  	// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
  1998  	// or REFUSED_STREAM.
  1999  	if sc.curClientStreams+1 > sc.advMaxStreams {
  2000  		if sc.unackedSettings == 0 {
  2001  			// They should know better.
  2002  			return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
  2003  		}
  2004  		// Assume it's a network race, where they just haven't
  2005  		// received our last SETTINGS update. But actually
  2006  		// this can't happen yet, because we don't yet provide
  2007  		// a way for users to adjust server parameters at
  2008  		// runtime.
  2009  		return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
  2010  	}
  2011  
  2012  	initialState := stateOpen
  2013  	if f.StreamEnded() {
  2014  		initialState = stateHalfClosedRemote
  2015  	}
  2016  
  2017  	// We are handling two special cases here:
  2018  	// 1. When a request is sent via an intermediary, we force priority to be
  2019  	// u=3,i. This is essentially a round-robin behavior, and is done to ensure
  2020  	// fairness between, for example, multiple clients using the same proxy.
  2021  	// 2. Until a client has shown that it is aware of RFC 9218, we make its
  2022  	// streams non-incremental by default. This is done to preserve the
  2023  	// historical behavior of handling streams in a round-robin manner, rather
  2024  	// than one-by-one to completion.
  2025  	initialPriority := defaultRFC9218Priority(sc.priorityAware && !sc.hasIntermediary)
  2026  	if _, ok := sc.writeSched.(*priorityWriteSchedulerRFC9218); ok && !sc.hasIntermediary {
  2027  		headerPriority, priorityAware, hasIntermediary := f.rfc9218Priority(sc.priorityAware)
  2028  		initialPriority = headerPriority
  2029  		sc.hasIntermediary = hasIntermediary
  2030  		if priorityAware {
  2031  			sc.priorityAware = true
  2032  		}
  2033  	}
  2034  	st := sc.newStream(id, 0, initialState, initialPriority)
  2035  
  2036  	if f.HasPriority() {
  2037  		if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
  2038  			return err
  2039  		}
  2040  		if !sc.writeSchedIgnoresRFC7540() {
  2041  			sc.writeSched.AdjustStream(st.id, f.Priority)
  2042  		}
  2043  	}
  2044  
  2045  	rw, req, err := sc.newWriterAndRequest(st, f)
  2046  	if err != nil {
  2047  		return err
  2048  	}
  2049  	st.reqTrailer = req.Trailer
  2050  	if st.reqTrailer != nil {
  2051  		st.trailer = make(Header)
  2052  	}
  2053  	st.body = req.Body.(*requestBody).pipe // may be nil
  2054  	st.declBodyBytes = req.ContentLength
  2055  
  2056  	handler := sc.handler.ServeHTTP
  2057  	if f.Truncated {
  2058  		// Their header list was too long. Send a 431 error.
  2059  		handler = handleHeaderListTooLong
  2060  	} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
  2061  		handler = serve400Handler{err}.ServeHTTP
  2062  	}
  2063  
  2064  	// The net/http package sets the read deadline from the
  2065  	// http.Server.ReadTimeout during the TLS handshake, but then
  2066  	// passes the connection off to us with the deadline already
  2067  	// set. Disarm it here after the request headers are read,
  2068  	// similar to how the http1 server works. Here it's
  2069  	// technically more like the http1 Server's ReadHeaderTimeout
  2070  	// (in Go 1.8), though. That's a more sane option anyway.
  2071  	if sc.hs.ReadTimeout() > 0 {
  2072  		sc.conn.SetReadDeadline(time.Time{})
  2073  		st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout(), st.onReadTimeout)
  2074  	}
  2075  
  2076  	return sc.scheduleHandler(id, rw, req, handler)
  2077  }
  2078  
  2079  func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
  2080  	sc := st.sc
  2081  	sc.serveG.check()
  2082  	if st.gotTrailerHeader {
  2083  		return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
  2084  	}
  2085  	st.gotTrailerHeader = true
  2086  	if !f.StreamEnded() {
  2087  		return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
  2088  	}
  2089  
  2090  	if len(f.PseudoFields()) > 0 {
  2091  		return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
  2092  	}
  2093  	if st.trailer != nil {
  2094  		for _, hf := range f.RegularFields() {
  2095  			key := sc.canonicalHeader(hf.Name)
  2096  			if !httpguts.ValidTrailerHeader(key) {
  2097  				// TODO: send more details to the peer somehow. But http2 has
  2098  				// no way to send debug data at a stream level. Discuss with
  2099  				// HTTP folk.
  2100  				return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
  2101  			}
  2102  			st.trailer[key] = append(st.trailer[key], hf.Value)
  2103  		}
  2104  	}
  2105  	st.endStream()
  2106  	return nil
  2107  }
  2108  
  2109  func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
  2110  	if streamID == p.StreamDep {
  2111  		// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
  2112  		// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
  2113  		// Section 5.3.3 says that a stream can depend on one of its dependencies,
  2114  		// so it's only self-dependencies that are forbidden.
  2115  		return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
  2116  	}
  2117  	return nil
  2118  }
  2119  
  2120  func (sc *serverConn) processPriority(f *PriorityFrame) error {
  2121  	if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
  2122  		return err
  2123  	}
  2124  	// We need to avoid calling AdjustStream when using the RFC 9218 write
  2125  	// scheduler. Otherwise, incremental's zero value in PriorityParam will
  2126  	// unexpectedly make all streams non-incremental. This causes us to process
  2127  	// streams one-by-one to completion rather than doing it in a round-robin
  2128  	// manner (the historical behavior), which might be unexpected to users.
  2129  	if sc.writeSchedIgnoresRFC7540() {
  2130  		return nil
  2131  	}
  2132  	sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
  2133  	return nil
  2134  }
  2135  
  2136  func (sc *serverConn) processPriorityUpdate(f *PriorityUpdateFrame) error {
  2137  	sc.priorityAware = true
  2138  	if _, ok := sc.writeSched.(*priorityWriteSchedulerRFC9218); !ok {
  2139  		return nil
  2140  	}
  2141  	p, ok := parseRFC9218Priority(f.Priority, sc.priorityAware)
  2142  	if !ok {
  2143  		return sc.countError("unparsable_priority_update", streamError(f.PrioritizedStreamID, ErrCodeProtocol))
  2144  	}
  2145  	sc.writeSched.AdjustStream(f.PrioritizedStreamID, p)
  2146  	return nil
  2147  }
  2148  
  2149  func (sc *serverConn) newStream(id, pusherID uint32, state streamState, priority PriorityParam) *stream {
  2150  	sc.serveG.check()
  2151  	if id == 0 {
  2152  		panic("internal error: cannot create stream with id 0")
  2153  	}
  2154  
  2155  	ctx, cancelCtx := context.WithCancel(sc.baseCtx)
  2156  	st := &stream{
  2157  		sc:        sc,
  2158  		id:        id,
  2159  		state:     state,
  2160  		ctx:       ctx,
  2161  		cancelCtx: cancelCtx,
  2162  	}
  2163  	st.cw.Init()
  2164  	st.flow.conn = &sc.flow // link to conn-level counter
  2165  	st.flow.add(sc.initialStreamSendWindowSize)
  2166  	st.inflow.init(sc.initialStreamRecvWindowSize)
  2167  	if writeTimeout := sc.hs.WriteTimeout(); writeTimeout > 0 {
  2168  		st.writeDeadline = time.AfterFunc(writeTimeout, st.onWriteTimeout)
  2169  	}
  2170  
  2171  	sc.streams[id] = st
  2172  	sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID, priority: priority})
  2173  	if st.isPushed() {
  2174  		sc.curPushedStreams++
  2175  	} else {
  2176  		sc.curClientStreams++
  2177  	}
  2178  	if sc.curOpenStreams() == 1 {
  2179  		sc.setConnState(ConnStateActive)
  2180  	}
  2181  
  2182  	return st
  2183  }
  2184  
  2185  func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *ServerRequest, error) {
  2186  	sc.serveG.check()
  2187  
  2188  	rp := httpcommon.ServerRequestParam{
  2189  		Method:    f.PseudoValue("method"),
  2190  		Scheme:    f.PseudoValue("scheme"),
  2191  		Authority: f.PseudoValue("authority"),
  2192  		Path:      f.PseudoValue("path"),
  2193  		Protocol:  f.PseudoValue("protocol"),
  2194  	}
  2195  
  2196  	// extended connect is disabled, so we should not see :protocol
  2197  	if disableExtendedConnectProtocol && rp.Protocol != "" {
  2198  		return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
  2199  	}
  2200  
  2201  	isConnect := rp.Method == "CONNECT"
  2202  	if isConnect {
  2203  		if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
  2204  			return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
  2205  		}
  2206  	} else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
  2207  		// See 8.1.2.6 Malformed Requests and Responses:
  2208  		//
  2209  		// Malformed requests or responses that are detected
  2210  		// MUST be treated as a stream error (Section 5.4.2)
  2211  		// of type PROTOCOL_ERROR."
  2212  		//
  2213  		// 8.1.2.3 Request Pseudo-Header Fields
  2214  		// "All HTTP/2 requests MUST include exactly one valid
  2215  		// value for the :method, :scheme, and :path
  2216  		// pseudo-header fields"
  2217  		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
  2218  	}
  2219  
  2220  	header := make(Header)
  2221  	rp.Header = header
  2222  	for _, hf := range f.RegularFields() {
  2223  		header.Add(sc.canonicalHeader(hf.Name), hf.Value)
  2224  	}
  2225  	if rp.Authority == "" {
  2226  		rp.Authority = header.Get("Host")
  2227  	}
  2228  	if rp.Protocol != "" {
  2229  		header.Set(":protocol", rp.Protocol)
  2230  	}
  2231  
  2232  	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
  2233  	if err != nil {
  2234  		return nil, nil, err
  2235  	}
  2236  	bodyOpen := !f.StreamEnded()
  2237  	if bodyOpen {
  2238  		if vv, ok := rp.Header["Content-Length"]; ok {
  2239  			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
  2240  				req.ContentLength = int64(cl)
  2241  			} else {
  2242  				req.ContentLength = 0
  2243  			}
  2244  		} else {
  2245  			req.ContentLength = -1
  2246  		}
  2247  		req.Body.(*requestBody).pipe = &pipe{
  2248  			b: &dataBuffer{expected: req.ContentLength},
  2249  		}
  2250  	}
  2251  	return rw, req, nil
  2252  }
  2253  
  2254  func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *ServerRequest, error) {
  2255  	sc.serveG.check()
  2256  
  2257  	var tlsState *tls.ConnectionState // nil if not scheme https
  2258  	if rp.Scheme == "https" {
  2259  		tlsState = sc.tlsState
  2260  	}
  2261  
  2262  	res := httpcommon.NewServerRequest(rp)
  2263  	if res.InvalidReason != "" {
  2264  		return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
  2265  	}
  2266  
  2267  	body := &requestBody{
  2268  		conn:          sc,
  2269  		stream:        st,
  2270  		needsContinue: res.NeedsContinue,
  2271  	}
  2272  	rw := sc.newResponseWriter(st)
  2273  	rw.rws.req = ServerRequest{
  2274  		Context:    st.ctx,
  2275  		Method:     rp.Method,
  2276  		URL:        res.URL,
  2277  		RemoteAddr: sc.remoteAddrStr,
  2278  		Header:     rp.Header,
  2279  		RequestURI: res.RequestURI,
  2280  		Proto:      "HTTP/2.0",
  2281  		ProtoMajor: 2,
  2282  		ProtoMinor: 0,
  2283  		TLS:        tlsState,
  2284  		Host:       rp.Authority,
  2285  		Body:       body,
  2286  		Trailer:    res.Trailer,
  2287  	}
  2288  	return rw, &rw.rws.req, nil
  2289  }
  2290  
  2291  func (sc *serverConn) newResponseWriter(st *stream) *responseWriter {
  2292  	rws := responseWriterStatePool.Get().(*responseWriterState)
  2293  	bwSave := rws.bw
  2294  	*rws = responseWriterState{} // zero all the fields
  2295  	rws.conn = sc
  2296  	rws.bw = bwSave
  2297  	rws.bw.Reset(chunkWriter{rws})
  2298  	rws.stream = st
  2299  	return &responseWriter{rws: rws}
  2300  }
  2301  
  2302  type unstartedHandler struct {
  2303  	streamID uint32
  2304  	rw       *responseWriter
  2305  	req      *ServerRequest
  2306  	handler  func(*ResponseWriter, *ServerRequest)
  2307  }
  2308  
  2309  // scheduleHandler starts a handler goroutine,
  2310  // or schedules one to start as soon as an existing handler finishes.
  2311  func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *ServerRequest, handler func(*ResponseWriter, *ServerRequest)) error {
  2312  	sc.serveG.check()
  2313  	maxHandlers := sc.advMaxStreams
  2314  	if sc.curHandlers < maxHandlers {
  2315  		sc.curHandlers++
  2316  		go sc.runHandler(rw, req, handler)
  2317  		return nil
  2318  	}
  2319  	if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
  2320  		return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
  2321  	}
  2322  	sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
  2323  		streamID: streamID,
  2324  		rw:       rw,
  2325  		req:      req,
  2326  		handler:  handler,
  2327  	})
  2328  	return nil
  2329  }
  2330  
  2331  func (sc *serverConn) handlerDone() {
  2332  	sc.serveG.check()
  2333  	sc.curHandlers--
  2334  	i := 0
  2335  	maxHandlers := sc.advMaxStreams
  2336  	for ; i < len(sc.unstartedHandlers); i++ {
  2337  		u := sc.unstartedHandlers[i]
  2338  		if sc.streams[u.streamID] == nil {
  2339  			// This stream was reset before its goroutine had a chance to start.
  2340  			continue
  2341  		}
  2342  		if sc.curHandlers >= maxHandlers {
  2343  			break
  2344  		}
  2345  		sc.curHandlers++
  2346  		go sc.runHandler(u.rw, u.req, u.handler)
  2347  		sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
  2348  	}
  2349  	sc.unstartedHandlers = sc.unstartedHandlers[i:]
  2350  	if len(sc.unstartedHandlers) == 0 {
  2351  		sc.unstartedHandlers = nil
  2352  	}
  2353  }
  2354  
  2355  // Run on its own goroutine.
  2356  func (sc *serverConn) runHandler(rw *responseWriter, req *ServerRequest, handler func(*ResponseWriter, *ServerRequest)) {
  2357  	defer sc.sendServeMsg(handlerDoneMsg)
  2358  	didPanic := true
  2359  	defer func() {
  2360  		rw.rws.stream.cancelCtx()
  2361  		if req.MultipartForm != nil {
  2362  			req.MultipartForm.RemoveAll()
  2363  		}
  2364  		if didPanic {
  2365  			e := recover()
  2366  			sc.writeFrameFromHandler(FrameWriteRequest{
  2367  				write:  handlerPanicRST{rw.rws.stream.id},
  2368  				stream: rw.rws.stream,
  2369  			})
  2370  			// Same as net/http:
  2371  			if e != nil && e != ErrAbortHandler {
  2372  				const size = 64 << 10
  2373  				buf := make([]byte, size)
  2374  				buf = buf[:runtime.Stack(buf, false)]
  2375  				sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
  2376  			}
  2377  			return
  2378  		}
  2379  		rw.handlerDone()
  2380  	}()
  2381  	handler(rw, req)
  2382  	didPanic = false
  2383  }
  2384  
  2385  func handleHeaderListTooLong(w *ResponseWriter, r *ServerRequest) {
  2386  	// 10.5.1 Limits on Header Block Size:
  2387  	// .. "A server that receives a larger header block than it is
  2388  	// willing to handle can send an HTTP 431 (Request Header Fields Too
  2389  	// Large) status code"
  2390  	const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
  2391  	w.WriteHeader(statusRequestHeaderFieldsTooLarge)
  2392  	io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
  2393  }
  2394  
  2395  // called from handler goroutines.
  2396  // h may be nil.
  2397  func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
  2398  	sc.serveG.checkNotOn() // NOT on
  2399  	var errc chan error
  2400  	if headerData.h != nil {
  2401  		// If there's a header map (which we don't own), so we have to block on
  2402  		// waiting for this frame to be written, so an http.Flush mid-handler
  2403  		// writes out the correct value of keys, before a handler later potentially
  2404  		// mutates it.
  2405  		errc = sc.srv.state.getErrChan()
  2406  	}
  2407  	if err := sc.writeFrameFromHandler(FrameWriteRequest{
  2408  		write:  headerData,
  2409  		stream: st,
  2410  		done:   errc,
  2411  	}); err != nil {
  2412  		return err
  2413  	}
  2414  	if errc != nil {
  2415  		select {
  2416  		case err := <-errc:
  2417  			sc.srv.state.putErrChan(errc)
  2418  			return err
  2419  		case <-sc.doneServing:
  2420  			return errClientDisconnected
  2421  		case <-st.cw:
  2422  			return errStreamClosed
  2423  		}
  2424  	}
  2425  	return nil
  2426  }
  2427  
  2428  // called from handler goroutines.
  2429  func (sc *serverConn) write100ContinueHeaders(st *stream) {
  2430  	sc.writeFrameFromHandler(FrameWriteRequest{
  2431  		write:  write100ContinueHeadersFrame{st.id},
  2432  		stream: st,
  2433  	})
  2434  }
  2435  
  2436  // A bodyReadMsg tells the server loop that the http.Handler read n
  2437  // bytes of the DATA from the client on the given stream.
  2438  type bodyReadMsg struct {
  2439  	st *stream
  2440  	n  int
  2441  }
  2442  
  2443  // called from handler goroutines.
  2444  // Notes that the handler for the given stream ID read n bytes of its body
  2445  // and schedules flow control tokens to be sent.
  2446  func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
  2447  	sc.serveG.checkNotOn() // NOT on
  2448  	if n > 0 {
  2449  		select {
  2450  		case sc.bodyReadCh <- bodyReadMsg{st, n}:
  2451  		case <-sc.doneServing:
  2452  		}
  2453  	}
  2454  }
  2455  
  2456  func (sc *serverConn) noteBodyRead(st *stream, n int) {
  2457  	sc.serveG.check()
  2458  	sc.sendWindowUpdate(nil, n) // conn-level
  2459  	if st.state != stateHalfClosedRemote && st.state != stateClosed {
  2460  		// Don't send this WINDOW_UPDATE if the stream is closed
  2461  		// remotely.
  2462  		sc.sendWindowUpdate(st, n)
  2463  	}
  2464  }
  2465  
  2466  // st may be nil for conn-level
  2467  func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
  2468  	sc.sendWindowUpdate(st, int(n))
  2469  }
  2470  
  2471  // st may be nil for conn-level
  2472  func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
  2473  	sc.serveG.check()
  2474  	var streamID uint32
  2475  	var send int32
  2476  	if st == nil {
  2477  		send = sc.inflow.add(n)
  2478  	} else {
  2479  		streamID = st.id
  2480  		send = st.inflow.add(n)
  2481  	}
  2482  	if send == 0 {
  2483  		return
  2484  	}
  2485  	sc.writeFrame(FrameWriteRequest{
  2486  		write:  writeWindowUpdate{streamID: streamID, n: uint32(send)},
  2487  		stream: st,
  2488  	})
  2489  }
  2490  
  2491  // requestBody is the Handler's Request.Body type.
  2492  // Read and Close may be called concurrently.
  2493  type requestBody struct {
  2494  	_             incomparable
  2495  	stream        *stream
  2496  	conn          *serverConn
  2497  	closeOnce     sync.Once // for use by Close only
  2498  	sawEOF        bool      // for use by Read only
  2499  	pipe          *pipe     // non-nil if we have an HTTP entity message body
  2500  	needsContinue bool      // need to send a 100-continue
  2501  }
  2502  
  2503  func (b *requestBody) Close() error {
  2504  	b.closeOnce.Do(func() {
  2505  		if b.pipe != nil {
  2506  			b.pipe.BreakWithError(errClosedBody)
  2507  		}
  2508  	})
  2509  	return nil
  2510  }
  2511  
  2512  func (b *requestBody) Read(p []byte) (n int, err error) {
  2513  	if b.needsContinue {
  2514  		b.needsContinue = false
  2515  		b.conn.write100ContinueHeaders(b.stream)
  2516  	}
  2517  	if b.pipe == nil || b.sawEOF {
  2518  		return 0, io.EOF
  2519  	}
  2520  	n, err = b.pipe.Read(p)
  2521  	if err == io.EOF {
  2522  		b.sawEOF = true
  2523  	}
  2524  	if b.conn == nil {
  2525  		return
  2526  	}
  2527  	b.conn.noteBodyReadFromHandler(b.stream, n, err)
  2528  	return
  2529  }
  2530  
  2531  // responseWriter is the http.ResponseWriter implementation. It's
  2532  // intentionally small (1 pointer wide) to minimize garbage. The
  2533  // responseWriterState pointer inside is zeroed at the end of a
  2534  // request (in handlerDone) and calls on the responseWriter thereafter
  2535  // simply crash (caller's mistake), but the much larger responseWriterState
  2536  // and buffers are reused between multiple requests.
  2537  type responseWriter struct {
  2538  	rws *responseWriterState
  2539  }
  2540  
  2541  type responseWriterState struct {
  2542  	// immutable within a request:
  2543  	stream *stream
  2544  	req    ServerRequest
  2545  	conn   *serverConn
  2546  
  2547  	// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
  2548  	bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
  2549  
  2550  	// mutated by http.Handler goroutine:
  2551  	handlerHeader Header   // nil until called
  2552  	snapHeader    Header   // snapshot of handlerHeader at WriteHeader time
  2553  	trailers      []string // set in writeChunk
  2554  	status        int      // status code passed to WriteHeader
  2555  	wroteHeader   bool     // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
  2556  	sentHeader    bool     // have we sent the header frame?
  2557  	handlerDone   bool     // handler has finished
  2558  
  2559  	sentContentLen int64 // non-zero if handler set a Content-Length header
  2560  	wroteBytes     int64
  2561  
  2562  	closeNotifierMu sync.Mutex // guards closeNotifierCh
  2563  	closeNotifierCh chan bool  // nil until first used
  2564  }
  2565  
  2566  type chunkWriter struct{ rws *responseWriterState }
  2567  
  2568  func (cw chunkWriter) Write(p []byte) (n int, err error) {
  2569  	n, err = cw.rws.writeChunk(p)
  2570  	if err == errStreamClosed {
  2571  		// If writing failed because the stream has been closed,
  2572  		// return the reason it was closed.
  2573  		err = cw.rws.stream.closeErr
  2574  	}
  2575  	return n, err
  2576  }
  2577  
  2578  func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
  2579  
  2580  func (rws *responseWriterState) hasNonemptyTrailers() bool {
  2581  	for _, trailer := range rws.trailers {
  2582  		if _, ok := rws.handlerHeader[trailer]; ok {
  2583  			return true
  2584  		}
  2585  	}
  2586  	return false
  2587  }
  2588  
  2589  // declareTrailer is called for each Trailer header when the
  2590  // response header is written. It notes that a header will need to be
  2591  // written in the trailers at the end of the response.
  2592  func (rws *responseWriterState) declareTrailer(k string) {
  2593  	k = textproto.CanonicalMIMEHeaderKey(k)
  2594  	if !httpguts.ValidTrailerHeader(k) {
  2595  		// Forbidden by RFC 7230, section 4.1.2.
  2596  		rws.conn.logf("ignoring invalid trailer %q", k)
  2597  		return
  2598  	}
  2599  	if !strSliceContains(rws.trailers, k) {
  2600  		rws.trailers = append(rws.trailers, k)
  2601  	}
  2602  }
  2603  
  2604  const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" // keep in sync with net/http
  2605  
  2606  // writeChunk writes chunks from the bufio.Writer. But because
  2607  // bufio.Writer may bypass its chunking, sometimes p may be
  2608  // arbitrarily large.
  2609  //
  2610  // writeChunk is also responsible (on the first chunk) for sending the
  2611  // HEADER response.
  2612  func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
  2613  	if !rws.wroteHeader {
  2614  		rws.writeHeader(200)
  2615  	}
  2616  
  2617  	if rws.handlerDone {
  2618  		rws.promoteUndeclaredTrailers()
  2619  	}
  2620  
  2621  	isHeadResp := rws.req.Method == "HEAD"
  2622  	if !rws.sentHeader {
  2623  		rws.sentHeader = true
  2624  		var ctype, clen string
  2625  		if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
  2626  			rws.snapHeader.Del("Content-Length")
  2627  			if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
  2628  				rws.sentContentLen = int64(cl)
  2629  			} else {
  2630  				clen = ""
  2631  			}
  2632  		}
  2633  		_, hasContentLength := rws.snapHeader["Content-Length"]
  2634  		if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
  2635  			clen = strconv.Itoa(len(p))
  2636  		}
  2637  		_, hasContentType := rws.snapHeader["Content-Type"]
  2638  		// If the Content-Encoding is non-blank, we shouldn't
  2639  		// sniff the body. See Issue golang.org/issue/31753.
  2640  		ce := rws.snapHeader.Get("Content-Encoding")
  2641  		hasCE := len(ce) > 0
  2642  		if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
  2643  			ctype = internal.DetectContentType(p)
  2644  		}
  2645  		var date string
  2646  		if _, ok := rws.snapHeader["Date"]; !ok {
  2647  			// TODO(bradfitz): be faster here, like net/http? measure.
  2648  			date = time.Now().UTC().Format(TimeFormat)
  2649  		}
  2650  
  2651  		for _, v := range rws.snapHeader["Trailer"] {
  2652  			foreachHeaderElement(v, rws.declareTrailer)
  2653  		}
  2654  
  2655  		// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
  2656  		// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
  2657  		// down the TCP connection when idle, like we do for HTTP/1.
  2658  		// TODO: remove more Connection-specific header fields here, in addition
  2659  		// to "Connection".
  2660  		if _, ok := rws.snapHeader["Connection"]; ok {
  2661  			v := rws.snapHeader.Get("Connection")
  2662  			delete(rws.snapHeader, "Connection")
  2663  			if v == "close" {
  2664  				rws.conn.startGracefulShutdown()
  2665  			}
  2666  		}
  2667  
  2668  		endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
  2669  		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2670  			streamID:      rws.stream.id,
  2671  			httpResCode:   rws.status,
  2672  			h:             rws.snapHeader,
  2673  			endStream:     endStream,
  2674  			contentType:   ctype,
  2675  			contentLength: clen,
  2676  			date:          date,
  2677  		})
  2678  		if err != nil {
  2679  			return 0, err
  2680  		}
  2681  		if endStream {
  2682  			return 0, nil
  2683  		}
  2684  	}
  2685  	if isHeadResp {
  2686  		return len(p), nil
  2687  	}
  2688  	if len(p) == 0 && !rws.handlerDone {
  2689  		return 0, nil
  2690  	}
  2691  
  2692  	// only send trailers if they have actually been defined by the
  2693  	// server handler.
  2694  	hasNonemptyTrailers := rws.hasNonemptyTrailers()
  2695  	endStream := rws.handlerDone && !hasNonemptyTrailers
  2696  	if len(p) > 0 || endStream {
  2697  		// only send a 0 byte DATA frame if we're ending the stream.
  2698  		if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
  2699  			return 0, err
  2700  		}
  2701  	}
  2702  
  2703  	if rws.handlerDone && hasNonemptyTrailers {
  2704  		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2705  			streamID:  rws.stream.id,
  2706  			h:         rws.handlerHeader,
  2707  			trailers:  rws.trailers,
  2708  			endStream: true,
  2709  		})
  2710  		return len(p), err
  2711  	}
  2712  	return len(p), nil
  2713  }
  2714  
  2715  // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
  2716  // that, if present, signals that the map entry is actually for
  2717  // the response trailers, and not the response headers. The prefix
  2718  // is stripped after the ServeHTTP call finishes and the values are
  2719  // sent in the trailers.
  2720  //
  2721  // This mechanism is intended only for trailers that are not known
  2722  // prior to the headers being written. If the set of trailers is fixed
  2723  // or known before the header is written, the normal Go trailers mechanism
  2724  // is preferred:
  2725  //
  2726  //	https://golang.org/pkg/net/http/#ResponseWriter
  2727  //	https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
  2728  const TrailerPrefix = "Trailer:"
  2729  
  2730  // promoteUndeclaredTrailers permits http.Handlers to set trailers
  2731  // after the header has already been flushed. Because the Go
  2732  // ResponseWriter interface has no way to set Trailers (only the
  2733  // Header), and because we didn't want to expand the ResponseWriter
  2734  // interface, and because nobody used trailers, and because RFC 7230
  2735  // says you SHOULD (but not must) predeclare any trailers in the
  2736  // header, the official ResponseWriter rules said trailers in Go must
  2737  // be predeclared, and then we reuse the same ResponseWriter.Header()
  2738  // map to mean both Headers and Trailers. When it's time to write the
  2739  // Trailers, we pick out the fields of Headers that were declared as
  2740  // trailers. That worked for a while, until we found the first major
  2741  // user of Trailers in the wild: gRPC (using them only over http2),
  2742  // and gRPC libraries permit setting trailers mid-stream without
  2743  // predeclaring them. So: change of plans. We still permit the old
  2744  // way, but we also permit this hack: if a Header() key begins with
  2745  // "Trailer:", the suffix of that key is a Trailer. Because ':' is an
  2746  // invalid token byte anyway, there is no ambiguity. (And it's already
  2747  // filtered out) It's mildly hacky, but not terrible.
  2748  //
  2749  // This method runs after the Handler is done and promotes any Header
  2750  // fields to be trailers.
  2751  func (rws *responseWriterState) promoteUndeclaredTrailers() {
  2752  	for k, vv := range rws.handlerHeader {
  2753  		if !strings.HasPrefix(k, TrailerPrefix) {
  2754  			continue
  2755  		}
  2756  		trailerKey := strings.TrimPrefix(k, TrailerPrefix)
  2757  		rws.declareTrailer(trailerKey)
  2758  		rws.handlerHeader[textproto.CanonicalMIMEHeaderKey(trailerKey)] = vv
  2759  	}
  2760  
  2761  	if len(rws.trailers) > 1 {
  2762  		sorter := sorterPool.Get().(*sorter)
  2763  		sorter.SortStrings(rws.trailers)
  2764  		sorterPool.Put(sorter)
  2765  	}
  2766  }
  2767  
  2768  func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
  2769  	st := w.rws.stream
  2770  	if !deadline.IsZero() && deadline.Before(time.Now()) {
  2771  		// If we're setting a deadline in the past, reset the stream immediately
  2772  		// so writes after SetWriteDeadline returns will fail.
  2773  		st.onReadTimeout()
  2774  		return nil
  2775  	}
  2776  	w.rws.conn.sendServeMsg(func(sc *serverConn) {
  2777  		if st.readDeadline != nil {
  2778  			if !st.readDeadline.Stop() {
  2779  				// Deadline already exceeded, or stream has been closed.
  2780  				return
  2781  			}
  2782  		}
  2783  		if deadline.IsZero() {
  2784  			st.readDeadline = nil
  2785  		} else if st.readDeadline == nil {
  2786  			st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
  2787  		} else {
  2788  			st.readDeadline.Reset(deadline.Sub(time.Now()))
  2789  		}
  2790  	})
  2791  	return nil
  2792  }
  2793  
  2794  func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
  2795  	st := w.rws.stream
  2796  	if !deadline.IsZero() && deadline.Before(time.Now()) {
  2797  		// If we're setting a deadline in the past, reset the stream immediately
  2798  		// so writes after SetWriteDeadline returns will fail.
  2799  		st.onWriteTimeout()
  2800  		return nil
  2801  	}
  2802  	w.rws.conn.sendServeMsg(func(sc *serverConn) {
  2803  		if st.writeDeadline != nil {
  2804  			if !st.writeDeadline.Stop() {
  2805  				// Deadline already exceeded, or stream has been closed.
  2806  				return
  2807  			}
  2808  		}
  2809  		if deadline.IsZero() {
  2810  			st.writeDeadline = nil
  2811  		} else if st.writeDeadline == nil {
  2812  			st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
  2813  		} else {
  2814  			st.writeDeadline.Reset(deadline.Sub(time.Now()))
  2815  		}
  2816  	})
  2817  	return nil
  2818  }
  2819  
  2820  func (w *responseWriter) EnableFullDuplex() error {
  2821  	// We always support full duplex responses, so this is a no-op.
  2822  	return nil
  2823  }
  2824  
  2825  func (w *responseWriter) Flush() {
  2826  	w.FlushError()
  2827  }
  2828  
  2829  func (w *responseWriter) FlushError() error {
  2830  	rws := w.rws
  2831  	if rws == nil {
  2832  		panic("Header called after Handler finished")
  2833  	}
  2834  	var err error
  2835  	if rws.bw.Buffered() > 0 {
  2836  		err = rws.bw.Flush()
  2837  	} else {
  2838  		// The bufio.Writer won't call chunkWriter.Write
  2839  		// (writeChunk with zero bytes), so we have to do it
  2840  		// ourselves to force the HTTP response header and/or
  2841  		// final DATA frame (with END_STREAM) to be sent.
  2842  		_, err = chunkWriter{rws}.Write(nil)
  2843  		if err == nil {
  2844  			select {
  2845  			case <-rws.stream.cw:
  2846  				err = rws.stream.closeErr
  2847  			default:
  2848  			}
  2849  		}
  2850  	}
  2851  	return err
  2852  }
  2853  
  2854  func (w *responseWriter) CloseNotify() <-chan bool {
  2855  	rws := w.rws
  2856  	if rws == nil {
  2857  		panic("CloseNotify called after Handler finished")
  2858  	}
  2859  	rws.closeNotifierMu.Lock()
  2860  	ch := rws.closeNotifierCh
  2861  	if ch == nil {
  2862  		ch = make(chan bool, 1)
  2863  		rws.closeNotifierCh = ch
  2864  		cw := rws.stream.cw
  2865  		go func() {
  2866  			cw.Wait() // wait for close
  2867  			ch <- true
  2868  		}()
  2869  	}
  2870  	rws.closeNotifierMu.Unlock()
  2871  	return ch
  2872  }
  2873  
  2874  func (w *responseWriter) Header() Header {
  2875  	rws := w.rws
  2876  	if rws == nil {
  2877  		panic("Header called after Handler finished")
  2878  	}
  2879  	if rws.handlerHeader == nil {
  2880  		rws.handlerHeader = make(Header)
  2881  	}
  2882  	return rws.handlerHeader
  2883  }
  2884  
  2885  // checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
  2886  func checkWriteHeaderCode(code int) {
  2887  	// Issue 22880: require valid WriteHeader status codes.
  2888  	// For now we only enforce that it's three digits.
  2889  	// In the future we might block things over 599 (600 and above aren't defined
  2890  	// at http://httpwg.org/specs/rfc7231.html#status.codes).
  2891  	// But for now any three digits.
  2892  	//
  2893  	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
  2894  	// no equivalent bogus thing we can realistically send in HTTP/2,
  2895  	// so we'll consistently panic instead and help people find their bugs
  2896  	// early. (We can't return an error from WriteHeader even if we wanted to.)
  2897  	if code < 100 || code > 999 {
  2898  		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
  2899  	}
  2900  }
  2901  
  2902  func (w *responseWriter) WriteHeader(code int) {
  2903  	rws := w.rws
  2904  	if rws == nil {
  2905  		panic("WriteHeader called after Handler finished")
  2906  	}
  2907  	rws.writeHeader(code)
  2908  }
  2909  
  2910  func (rws *responseWriterState) writeHeader(code int) {
  2911  	if rws.wroteHeader {
  2912  		return
  2913  	}
  2914  
  2915  	checkWriteHeaderCode(code)
  2916  
  2917  	// Handle informational headers
  2918  	if code >= 100 && code <= 199 {
  2919  		// Per RFC 8297 we must not clear the current header map
  2920  		h := rws.handlerHeader
  2921  
  2922  		_, cl := h["Content-Length"]
  2923  		_, te := h["Transfer-Encoding"]
  2924  		if cl || te {
  2925  			h = cloneHeader(h)
  2926  			h.Del("Content-Length")
  2927  			h.Del("Transfer-Encoding")
  2928  		}
  2929  
  2930  		rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2931  			streamID:    rws.stream.id,
  2932  			httpResCode: code,
  2933  			h:           h,
  2934  			endStream:   rws.handlerDone && !rws.hasTrailers(),
  2935  		})
  2936  
  2937  		return
  2938  	}
  2939  
  2940  	rws.wroteHeader = true
  2941  	rws.status = code
  2942  	if len(rws.handlerHeader) > 0 {
  2943  		rws.snapHeader = cloneHeader(rws.handlerHeader)
  2944  	}
  2945  }
  2946  
  2947  func cloneHeader(h Header) Header {
  2948  	h2 := make(Header, len(h))
  2949  	for k, vv := range h {
  2950  		vv2 := make([]string, len(vv))
  2951  		copy(vv2, vv)
  2952  		h2[k] = vv2
  2953  	}
  2954  	return h2
  2955  }
  2956  
  2957  // The Life Of A Write is like this:
  2958  //
  2959  // * Handler calls w.Write or w.WriteString ->
  2960  // * -> rws.bw (*bufio.Writer) ->
  2961  // * (Handler might call Flush)
  2962  // * -> chunkWriter{rws}
  2963  // * -> responseWriterState.writeChunk(p []byte)
  2964  // * -> responseWriterState.writeChunk (most of the magic; see comment there)
  2965  func (w *responseWriter) Write(p []byte) (n int, err error) {
  2966  	return w.write(len(p), p, "")
  2967  }
  2968  
  2969  func (w *responseWriter) WriteString(s string) (n int, err error) {
  2970  	return w.write(len(s), nil, s)
  2971  }
  2972  
  2973  // either dataB or dataS is non-zero.
  2974  func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
  2975  	rws := w.rws
  2976  	if rws == nil {
  2977  		panic("Write called after Handler finished")
  2978  	}
  2979  	if !rws.wroteHeader {
  2980  		w.WriteHeader(200)
  2981  	}
  2982  	if !bodyAllowedForStatus(rws.status) {
  2983  		return 0, ErrBodyNotAllowed
  2984  	}
  2985  	rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
  2986  	if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
  2987  		// TODO: send a RST_STREAM
  2988  		return 0, errors.New("http2: handler wrote more than declared Content-Length")
  2989  	}
  2990  
  2991  	if dataB != nil {
  2992  		return rws.bw.Write(dataB)
  2993  	} else {
  2994  		return rws.bw.WriteString(dataS)
  2995  	}
  2996  }
  2997  
  2998  func (w *responseWriter) handlerDone() {
  2999  	rws := w.rws
  3000  	rws.handlerDone = true
  3001  	w.Flush()
  3002  	w.rws = nil
  3003  	responseWriterStatePool.Put(rws)
  3004  }
  3005  
  3006  // Push errors.
  3007  var (
  3008  	ErrRecursivePush    = errors.New("http2: recursive push not allowed")
  3009  	ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
  3010  )
  3011  
  3012  func (w *responseWriter) Push(target, method string, header Header) error {
  3013  	st := w.rws.stream
  3014  	sc := st.sc
  3015  	sc.serveG.checkNotOn()
  3016  
  3017  	// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
  3018  	// http://tools.ietf.org/html/rfc7540#section-6.6
  3019  	if st.isPushed() {
  3020  		return ErrRecursivePush
  3021  	}
  3022  
  3023  	// Default options.
  3024  	if method == "" {
  3025  		method = "GET"
  3026  	}
  3027  	if header == nil {
  3028  		header = Header{}
  3029  	}
  3030  	wantScheme := "http"
  3031  	if w.rws.req.TLS != nil {
  3032  		wantScheme = "https"
  3033  	}
  3034  
  3035  	// Validate the request.
  3036  	u, err := url.Parse(target)
  3037  	if err != nil {
  3038  		return err
  3039  	}
  3040  	if u.Scheme == "" {
  3041  		if !strings.HasPrefix(target, "/") {
  3042  			return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
  3043  		}
  3044  		u.Scheme = wantScheme
  3045  		u.Host = w.rws.req.Host
  3046  	} else {
  3047  		if u.Scheme != wantScheme {
  3048  			return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
  3049  		}
  3050  		if u.Host == "" {
  3051  			return errors.New("URL must have a host")
  3052  		}
  3053  	}
  3054  	for k := range header {
  3055  		if strings.HasPrefix(k, ":") {
  3056  			return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
  3057  		}
  3058  		// These headers are meaningful only if the request has a body,
  3059  		// but PUSH_PROMISE requests cannot have a body.
  3060  		// http://tools.ietf.org/html/rfc7540#section-8.2
  3061  		// Also disallow Host, since the promised URL must be absolute.
  3062  		if asciiEqualFold(k, "content-length") ||
  3063  			asciiEqualFold(k, "content-encoding") ||
  3064  			asciiEqualFold(k, "trailer") ||
  3065  			asciiEqualFold(k, "te") ||
  3066  			asciiEqualFold(k, "expect") ||
  3067  			asciiEqualFold(k, "host") {
  3068  			return fmt.Errorf("promised request headers cannot include %q", k)
  3069  		}
  3070  	}
  3071  	if err := checkValidHTTP2RequestHeaders(header); err != nil {
  3072  		return err
  3073  	}
  3074  
  3075  	// The RFC effectively limits promised requests to GET and HEAD:
  3076  	// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
  3077  	// http://tools.ietf.org/html/rfc7540#section-8.2
  3078  	if method != "GET" && method != "HEAD" {
  3079  		return fmt.Errorf("method %q must be GET or HEAD", method)
  3080  	}
  3081  
  3082  	msg := &startPushRequest{
  3083  		parent: st,
  3084  		method: method,
  3085  		url:    u,
  3086  		header: cloneHeader(header),
  3087  		done:   sc.srv.state.getErrChan(),
  3088  	}
  3089  
  3090  	select {
  3091  	case <-sc.doneServing:
  3092  		return errClientDisconnected
  3093  	case <-st.cw:
  3094  		return errStreamClosed
  3095  	case sc.serveMsgCh <- msg:
  3096  	}
  3097  
  3098  	select {
  3099  	case <-sc.doneServing:
  3100  		return errClientDisconnected
  3101  	case <-st.cw:
  3102  		return errStreamClosed
  3103  	case err := <-msg.done:
  3104  		sc.srv.state.putErrChan(msg.done)
  3105  		return err
  3106  	}
  3107  }
  3108  
  3109  type startPushRequest struct {
  3110  	parent *stream
  3111  	method string
  3112  	url    *url.URL
  3113  	header Header
  3114  	done   chan error
  3115  }
  3116  
  3117  func (sc *serverConn) startPush(msg *startPushRequest) {
  3118  	sc.serveG.check()
  3119  
  3120  	// http://tools.ietf.org/html/rfc7540#section-6.6.
  3121  	// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
  3122  	// is in either the "open" or "half-closed (remote)" state.
  3123  	if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
  3124  		// responseWriter.Push checks that the stream is peer-initiated.
  3125  		msg.done <- errStreamClosed
  3126  		return
  3127  	}
  3128  
  3129  	// http://tools.ietf.org/html/rfc7540#section-6.6.
  3130  	if !sc.pushEnabled {
  3131  		msg.done <- ErrNotSupported
  3132  		return
  3133  	}
  3134  
  3135  	// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
  3136  	// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
  3137  	// is written. Once the ID is allocated, we start the request handler.
  3138  	allocatePromisedID := func() (uint32, error) {
  3139  		sc.serveG.check()
  3140  
  3141  		// Check this again, just in case. Technically, we might have received
  3142  		// an updated SETTINGS by the time we got around to writing this frame.
  3143  		if !sc.pushEnabled {
  3144  			return 0, ErrNotSupported
  3145  		}
  3146  		// http://tools.ietf.org/html/rfc7540#section-6.5.2.
  3147  		if sc.curPushedStreams+1 > sc.clientMaxStreams {
  3148  			return 0, ErrPushLimitReached
  3149  		}
  3150  
  3151  		// http://tools.ietf.org/html/rfc7540#section-5.1.1.
  3152  		// Streams initiated by the server MUST use even-numbered identifiers.
  3153  		// A server that is unable to establish a new stream identifier can send a GOAWAY
  3154  		// frame so that the client is forced to open a new connection for new streams.
  3155  		if sc.maxPushPromiseID+2 >= 1<<31 {
  3156  			sc.startGracefulShutdownInternal()
  3157  			return 0, ErrPushLimitReached
  3158  		}
  3159  		sc.maxPushPromiseID += 2
  3160  		promisedID := sc.maxPushPromiseID
  3161  
  3162  		// http://tools.ietf.org/html/rfc7540#section-8.2.
  3163  		// Strictly speaking, the new stream should start in "reserved (local)", then
  3164  		// transition to "half closed (remote)" after sending the initial HEADERS, but
  3165  		// we start in "half closed (remote)" for simplicity.
  3166  		// See further comments at the definition of stateHalfClosedRemote.
  3167  		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote, defaultRFC9218Priority(sc.priorityAware && !sc.hasIntermediary))
  3168  		rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
  3169  			Method:    msg.method,
  3170  			Scheme:    msg.url.Scheme,
  3171  			Authority: msg.url.Host,
  3172  			Path:      msg.url.RequestURI(),
  3173  			Header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
  3174  		})
  3175  		if err != nil {
  3176  			// Should not happen, since we've already validated msg.url.
  3177  			panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
  3178  		}
  3179  
  3180  		sc.curHandlers++
  3181  		go sc.runHandler(rw, req, sc.handler.ServeHTTP)
  3182  		return promisedID, nil
  3183  	}
  3184  
  3185  	sc.writeFrame(FrameWriteRequest{
  3186  		write: &writePushPromise{
  3187  			streamID:           msg.parent.id,
  3188  			method:             msg.method,
  3189  			url:                msg.url,
  3190  			h:                  msg.header,
  3191  			allocatePromisedID: allocatePromisedID,
  3192  		},
  3193  		stream: msg.parent,
  3194  		done:   msg.done,
  3195  	})
  3196  }
  3197  
  3198  // foreachHeaderElement splits v according to the "#rule" construction
  3199  // in RFC 7230 section 7 and calls fn for each non-empty element.
  3200  func foreachHeaderElement(v string, fn func(string)) {
  3201  	v = textproto.TrimString(v)
  3202  	if v == "" {
  3203  		return
  3204  	}
  3205  	if !strings.Contains(v, ",") {
  3206  		fn(v)
  3207  		return
  3208  	}
  3209  	for _, f := range strings.Split(v, ",") {
  3210  		if f = textproto.TrimString(f); f != "" {
  3211  			fn(f)
  3212  		}
  3213  	}
  3214  }
  3215  
  3216  // From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
  3217  var connHeaders = []string{
  3218  	"Connection",
  3219  	"Keep-Alive",
  3220  	"Proxy-Connection",
  3221  	"Transfer-Encoding",
  3222  	"Upgrade",
  3223  }
  3224  
  3225  // checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
  3226  // per RFC 7540 Section 8.1.2.2.
  3227  // The returned error is reported to users.
  3228  func checkValidHTTP2RequestHeaders(h Header) error {
  3229  	for _, k := range connHeaders {
  3230  		if _, ok := h[k]; ok {
  3231  			return fmt.Errorf("request header %q is not valid in HTTP/2", k)
  3232  		}
  3233  	}
  3234  	te := h["Te"]
  3235  	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
  3236  		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
  3237  	}
  3238  	return nil
  3239  }
  3240  
  3241  type serve400Handler struct {
  3242  	err error
  3243  }
  3244  
  3245  func (handler serve400Handler) ServeHTTP(w *ResponseWriter, r *ServerRequest) {
  3246  	const statusBadRequest = 400
  3247  
  3248  	// TODO: Dedup with http.Error?
  3249  	h := w.Header()
  3250  	h.Del("Content-Length")
  3251  	h.Set("Content-Type", "text/plain; charset=utf-8")
  3252  	h.Set("X-Content-Type-Options", "nosniff")
  3253  	w.WriteHeader(statusBadRequest)
  3254  	fmt.Fprintln(w, handler.err.Error())
  3255  }
  3256  
  3257  // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
  3258  // disabled. See comments on h1ServerShutdownChan above for why
  3259  // the code is written this way.
  3260  func h1ServerKeepAlivesDisabled(hs ServerConfig) bool {
  3261  	return !hs.DoKeepAlives()
  3262  }
  3263  
  3264  func (sc *serverConn) countError(name string, err error) error {
  3265  	if sc == nil || sc.srv == nil {
  3266  		return err
  3267  	}
  3268  	f := sc.countErrorFunc
  3269  	if f == nil {
  3270  		return err
  3271  	}
  3272  	var typ string
  3273  	var code ErrCode
  3274  	switch e := err.(type) {
  3275  	case ConnectionError:
  3276  		typ = "conn"
  3277  		code = ErrCode(e)
  3278  	case StreamError:
  3279  		typ = "stream"
  3280  		code = ErrCode(e.Code)
  3281  	default:
  3282  		return err
  3283  	}
  3284  	codeStr := errCodeName[code]
  3285  	if codeStr == "" {
  3286  		codeStr = strconv.Itoa(int(code))
  3287  	}
  3288  	f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
  3289  	return err
  3290  }
  3291  

View as plain text