\n")
- cacheBust := time.Now().UnixNano()
- for y := 0; y < yt; y++ {
- for x := 0; x < xt; x++ {
- fmt.Fprintf(w, "",
- tileSize, tileSize, x, y, cacheBust, ms)
- }
- io.WriteString(w, " \n")
- }
- io.WriteString(w, `
-
-<< Back to Go HTTP/2 demo server`)
- })
-}
-
-func httpsHost() string {
- if *hostHTTPS != "" {
- return *hostHTTPS
- }
- if v := *httpsAddr; strings.HasPrefix(v, ":") {
- return "localhost" + v
- } else {
- return v
- }
-}
-
-func httpHost() string {
- if *hostHTTP != "" {
- return *hostHTTP
- }
- if v := *httpAddr; strings.HasPrefix(v, ":") {
- return "localhost" + v
- } else {
- return v
- }
-}
-
-func serveProdTLS() error {
- c, err := googlestorage.NewServiceClient()
- if err != nil {
- return err
- }
- slurp := func(key string) ([]byte, error) {
- const bucket = "http2-demo-server-tls"
- rc, _, err := c.GetObject(&googlestorage.Object{
- Bucket: bucket,
- Key: key,
- })
- if err != nil {
- return nil, fmt.Errorf("Error fetching GCS object %q in bucket %q: %v", key, bucket, err)
- }
- defer rc.Close()
- return ioutil.ReadAll(rc)
- }
- certPem, err := slurp("http2.golang.org.chained.pem")
- if err != nil {
- return err
- }
- keyPem, err := slurp("http2.golang.org.key")
- if err != nil {
- return err
- }
- cert, err := tls.X509KeyPair(certPem, keyPem)
- if err != nil {
- return err
- }
- srv := &http.Server{
- TLSConfig: &tls.Config{
- Certificates: []tls.Certificate{cert},
- },
- }
- http2.ConfigureServer(srv, &http2.Server{})
- ln, err := net.Listen("tcp", ":443")
- if err != nil {
- return err
- }
- return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig))
-}
-
-type tcpKeepAliveListener struct {
- *net.TCPListener
-}
-
-func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
- tc, err := ln.AcceptTCP()
- if err != nil {
- return
- }
- tc.SetKeepAlive(true)
- tc.SetKeepAlivePeriod(3 * time.Minute)
- return tc, nil
-}
-
-func serveProd() error {
- errc := make(chan error, 2)
- go func() { errc <- http.ListenAndServe(":80", nil) }()
- go func() { errc <- serveProdTLS() }()
- return <-errc
-}
-
-const idleTimeout = 5 * time.Minute
-const activeTimeout = 10 * time.Minute
-
-// TODO: put this into the standard library and actually send
-// PING frames and GOAWAY, etc: golang.org/issue/14204
-func idleTimeoutHook() func(net.Conn, http.ConnState) {
- var mu sync.Mutex
- m := map[net.Conn]*time.Timer{}
- return func(c net.Conn, cs http.ConnState) {
- mu.Lock()
- defer mu.Unlock()
- if t, ok := m[c]; ok {
- delete(m, c)
- t.Stop()
- }
- var d time.Duration
- switch cs {
- case http.StateNew, http.StateIdle:
- d = idleTimeout
- case http.StateActive:
- d = activeTimeout
- default:
- return
- }
- m[c] = time.AfterFunc(d, func() {
- log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d)
- go c.Close()
- })
- }
-}
-
-func main() {
- var srv http.Server
- flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.")
- flag.Parse()
- srv.Addr = *httpsAddr
- srv.ConnState = idleTimeoutHook()
-
- registerHandlers()
-
- if *prod {
- *hostHTTP = "http2.golang.org"
- *hostHTTPS = "http2.golang.org"
- log.Fatal(serveProd())
- }
-
- url := "https://" + httpsHost() + "/"
- log.Printf("Listening on " + url)
- http2.ConfigureServer(&srv, &http2.Server{})
-
- if *httpAddr != "" {
- go func() {
- log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)")
- log.Fatal(http.ListenAndServe(*httpAddr, nil))
- }()
- }
-
- go func() {
- log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key"))
- }()
- select {}
-}
diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go
deleted file mode 100644
index 13b1cfd7..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/launch.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/http"
- "os"
- "strings"
- "time"
-
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
- compute "google.golang.org/api/compute/v1"
-)
-
-var (
- proj = flag.String("project", "symbolic-datum-552", "name of Project")
- zone = flag.String("zone", "us-central1-a", "GCE zone")
- mach = flag.String("machinetype", "n1-standard-1", "Machine type")
- instName = flag.String("instance_name", "http2-demo", "Name of VM instance.")
- sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
- staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.")
-
- writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
- publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.")
-)
-
-func readFile(v string) string {
- slurp, err := ioutil.ReadFile(v)
- if err != nil {
- log.Fatalf("Error reading %s: %v", v, err)
- }
- return strings.TrimSpace(string(slurp))
-}
-
-var config = &oauth2.Config{
- // The client-id and secret should be for an "Installed Application" when using
- // the CLI. Later we'll use a web application with a callback.
- ClientID: readFile("client-id.dat"),
- ClientSecret: readFile("client-secret.dat"),
- Endpoint: google.Endpoint,
- Scopes: []string{
- compute.DevstorageFullControlScope,
- compute.ComputeScope,
- "https://www.googleapis.com/auth/sqlservice",
- "https://www.googleapis.com/auth/sqlservice.admin",
- },
- RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
-}
-
-const baseConfig = `#cloud-config
-coreos:
- units:
- - name: h2demo.service
- command: start
- content: |
- [Unit]
- Description=HTTP2 Demo
-
- [Service]
- ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo'
- ExecStart=/opt/bin/h2demo --prod
- RestartSec=5s
- Restart=always
- Type=simple
-
- [Install]
- WantedBy=multi-user.target
-`
-
-func main() {
- flag.Parse()
- if *proj == "" {
- log.Fatalf("Missing --project flag")
- }
- prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
- machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
-
- const tokenFileName = "token.dat"
- tokenFile := tokenCacheFile(tokenFileName)
- tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
- token, err := tokenSource.Token()
- if err != nil {
- if *writeObject != "" {
- log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
- }
- log.Printf("Error getting token from %s: %v", tokenFileName, err)
- log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
- fmt.Print("\nEnter auth code: ")
- sc := bufio.NewScanner(os.Stdin)
- sc.Scan()
- authCode := strings.TrimSpace(sc.Text())
- token, err = config.Exchange(oauth2.NoContext, authCode)
- if err != nil {
- log.Fatalf("Error exchanging auth code for a token: %v", err)
- }
- if err := tokenFile.WriteToken(token); err != nil {
- log.Fatalf("Error writing to %s: %v", tokenFileName, err)
- }
- tokenSource = oauth2.ReuseTokenSource(token, nil)
- }
-
- oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
-
- if *writeObject != "" {
- writeCloudStorageObject(oauthClient)
- return
- }
-
- computeService, _ := compute.New(oauthClient)
-
- natIP := *staticIP
- if natIP == "" {
- // Try to find it by name.
- aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
- if err != nil {
- log.Fatal(err)
- }
- // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
- IPLoop:
- for _, asl := range aggAddrList.Items {
- for _, addr := range asl.Addresses {
- if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
- natIP = addr.Address
- break IPLoop
- }
- }
- }
- }
-
- cloudConfig := baseConfig
- if *sshPub != "" {
- key := strings.TrimSpace(readFile(*sshPub))
- cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
- }
- if os.Getenv("USER") == "bradfitz" {
- cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
- }
- const maxCloudConfig = 32 << 10 // per compute API docs
- if len(cloudConfig) > maxCloudConfig {
- log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
- }
-
- instance := &compute.Instance{
- Name: *instName,
- Description: "Go Builder",
- MachineType: machType,
- Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
- Tags: &compute.Tags{
- Items: []string{"http-server", "https-server"},
- },
- Metadata: &compute.Metadata{
- Items: []*compute.MetadataItems{
- {
- Key: "user-data",
- Value: &cloudConfig,
- },
- },
- },
- NetworkInterfaces: []*compute.NetworkInterface{
- &compute.NetworkInterface{
- AccessConfigs: []*compute.AccessConfig{
- &compute.AccessConfig{
- Type: "ONE_TO_ONE_NAT",
- Name: "External NAT",
- NatIP: natIP,
- },
- },
- Network: prefix + "/global/networks/default",
- },
- },
- ServiceAccounts: []*compute.ServiceAccount{
- {
- Email: "default",
- Scopes: []string{
- compute.DevstorageFullControlScope,
- compute.ComputeScope,
- },
- },
- },
- }
-
- log.Printf("Creating instance...")
- op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
- if err != nil {
- log.Fatalf("Failed to create instance: %v", err)
- }
- opName := op.Name
- log.Printf("Created. Waiting on operation %v", opName)
-OpLoop:
- for {
- time.Sleep(2 * time.Second)
- op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
- if err != nil {
- log.Fatalf("Failed to get op %s: %v", opName, err)
- }
- switch op.Status {
- case "PENDING", "RUNNING":
- log.Printf("Waiting on operation %v", opName)
- continue
- case "DONE":
- if op.Error != nil {
- for _, operr := range op.Error.Errors {
- log.Printf("Error: %+v", operr)
- }
- log.Fatalf("Failed to start.")
- }
- log.Printf("Success. %+v", op)
- break OpLoop
- default:
- log.Fatalf("Unknown status %q: %+v", op.Status, op)
- }
- }
-
- inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
- if err != nil {
- log.Fatalf("Error getting instance after creation: %v", err)
- }
- ij, _ := json.MarshalIndent(inst, "", " ")
- log.Printf("Instance: %s", ij)
-}
-
-func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
- const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016"
- diskName := *instName + "-disk"
-
- return &compute.AttachedDisk{
- AutoDelete: true,
- Boot: true,
- Type: "PERSISTENT",
- InitializeParams: &compute.AttachedDiskInitializeParams{
- DiskName: diskName,
- SourceImage: imageURL,
- DiskSizeGb: 50,
- },
- }
-}
-
-func writeCloudStorageObject(httpClient *http.Client) {
- content := os.Stdin
- const maxSlurp = 1 << 20
- var buf bytes.Buffer
- n, err := io.CopyN(&buf, content, maxSlurp)
- if err != nil && err != io.EOF {
- log.Fatalf("Error reading from stdin: %v, %v", n, err)
- }
- contentType := http.DetectContentType(buf.Bytes())
-
- req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
- if err != nil {
- log.Fatal(err)
- }
- req.Header.Set("x-goog-api-version", "2")
- if *publicObject {
- req.Header.Set("x-goog-acl", "public-read")
- }
- req.Header.Set("Content-Type", contentType)
- res, err := httpClient.Do(req)
- if err != nil {
- log.Fatal(err)
- }
- if res.StatusCode != 200 {
- res.Write(os.Stderr)
- log.Fatalf("Failed.")
- }
- log.Printf("Success.")
- os.Exit(0)
-}
-
-type tokenCacheFile string
-
-func (f tokenCacheFile) Token() (*oauth2.Token, error) {
- slurp, err := ioutil.ReadFile(string(f))
- if err != nil {
- return nil, err
- }
- t := new(oauth2.Token)
- if err := json.Unmarshal(slurp, t); err != nil {
- return nil, err
- }
- return t, nil
-}
-
-func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
- jt, err := json.Marshal(t)
- if err != nil {
- return err
- }
- return ioutil.WriteFile(string(f), jt, 0600)
-}
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key
deleted file mode 100644
index a15a6aba..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/rootCA.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q
-62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby
-XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV
-mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ
-JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ
-SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA
-nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e
-/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx
-qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser
-hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j
-NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E
-LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7
-8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c
-0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws
-K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd
-bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo
-QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt
-Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1
-nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy
-b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7
-gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev
-WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr
-C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj
-x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA
-hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y
------END RSA PRIVATE KEY-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem
deleted file mode 100644
index 3a323e77..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem
+++ /dev/null
@@ -1,26 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV
-BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG
-A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3
-DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0
-NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG
-cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv
-c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B
-AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS
-R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT
-ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk
-JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3
-mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW
-caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G
-A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt
-hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB
-MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES
-MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv
-bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h
-U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao
-eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4
-UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD
-58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n
-sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF
-kPe6XoSbiLm/kxk32T0=
------END CERTIFICATE-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl
deleted file mode 100644
index 6db38918..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl
+++ /dev/null
@@ -1 +0,0 @@
-E2CE26BF3285059C
diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt
deleted file mode 100644
index c59059bd..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/server.crt
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV
-UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT
-C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW
-DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow
-RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE
-ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l
-gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2
-dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL
-A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws
-/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88
-F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
-AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R
-rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD
-EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19
-KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI
-dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU
-90p6/CbU71bGbfpM2PHot2fm
------END CERTIFICATE-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key
deleted file mode 100644
index f329c142..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/server.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi
-fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm
-J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef
-b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55
-mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/
-fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p
-3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3
-qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4
-NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80
-LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN
-a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+
-Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL
-W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO
-gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm
-S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS
-Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp
-V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4
-KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4
-yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5
-drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e
-ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R
-48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5
-c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY
-nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl
-IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd
------END RSA PRIVATE KEY-----
diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md
deleted file mode 100644
index fb5c5efb..00000000
--- a/vendor/golang.org/x/net/http2/h2i/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# h2i
-
-**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol'
-days of telnetting to your HTTP/1.n servers? We're bringing you
-back.
-
-Features:
-- send raw HTTP/2 frames
- - PING
- - SETTINGS
- - HEADERS
- - etc
-- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2
-- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)
-- tab completion of commands, options
-
-Not yet features, but soon:
-- unnecessary CONTINUATION frames on short boundaries, to test peer implementations
-- request bodies (DATA frames)
-- send invalid frames for testing server implementations (supported by underlying Framer)
-
-Later:
-- act like a server
-
-## Installation
-
-```
-$ go get golang.org/x/net/http2/h2i
-$ h2i
-```
-
-## Demo
-
-```
-$ h2i
-Usage: h2i
-
- -insecure
- Whether to skip TLS cert validation
- -nextproto string
- Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14")
-
-$ h2i google.com
-Connecting to google.com:443 ...
-Connected to 74.125.224.41:443
-Negotiated protocol "h2-14"
-[FrameHeader SETTINGS len=18]
- [MAX_CONCURRENT_STREAMS = 100]
- [INITIAL_WINDOW_SIZE = 1048576]
- [MAX_FRAME_SIZE = 16384]
-[FrameHeader WINDOW_UPDATE len=4]
- Window-Increment = 983041
-
-h2i> PING h2iSayHI
-[FrameHeader PING flags=ACK len=8]
- Data = "h2iSayHI"
-h2i> headers
-(as HTTP/1.1)> GET / HTTP/1.1
-(as HTTP/1.1)> Host: ip.appspot.com
-(as HTTP/1.1)> User-Agent: h2i/brad-n-blake
-(as HTTP/1.1)>
-Opening Stream-ID 1:
- :authority = ip.appspot.com
- :method = GET
- :path = /
- :scheme = https
- user-agent = h2i/brad-n-blake
-[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]
- :status = "200"
- alternate-protocol = "443:quic,p=1"
- content-length = "15"
- content-type = "text/html"
- date = "Fri, 01 May 2015 23:06:56 GMT"
- server = "Google Frontend"
-[FrameHeader DATA flags=END_STREAM stream=1 len=15]
- "173.164.155.78\n"
-[FrameHeader PING len=8]
- Data = "\x00\x00\x00\x00\x00\x00\x00\x00"
-h2i> ping
-[FrameHeader PING flags=ACK len=8]
- Data = "h2i_ping"
-h2i> ping
-[FrameHeader PING flags=ACK len=8]
- Data = "h2i_ping"
-h2i> ping
-[FrameHeader GOAWAY len=22]
- Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)
-
-ReadFrame: EOF
-```
-
-## Status
-
-Quick few hour hack. So much yet to do. Feel free to file issues for
-bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)
-and I aren't yet accepting pull requests until things settle down.
-
diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go
deleted file mode 100644
index cf60d56c..00000000
--- a/vendor/golang.org/x/net/http2/h2i/h2i.go
+++ /dev/null
@@ -1,501 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!solaris
-
-/*
-The h2i command is an interactive HTTP/2 console.
-
-Usage:
- $ h2i [flags]
-
-Interactive commands in the console: (all parts case-insensitive)
-
- ping [data]
- settings ack
- settings FOO=n BAR=z
- headers (open a new stream by typing HTTP/1.1)
-*/
-package main
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "errors"
- "flag"
- "fmt"
- "io"
- "log"
- "net"
- "net/http"
- "os"
- "regexp"
- "strconv"
- "strings"
-
- "golang.org/x/crypto/ssh/terminal"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
-)
-
-// Flags
-var (
- flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.")
- flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation")
- flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.")
-)
-
-type command struct {
- run func(*h2i, []string) error // required
-
- // complete optionally specifies tokens (case-insensitive) which are
- // valid for this subcommand.
- complete func() []string
-}
-
-var commands = map[string]command{
- "ping": command{run: (*h2i).cmdPing},
- "settings": command{
- run: (*h2i).cmdSettings,
- complete: func() []string {
- return []string{
- "ACK",
- http2.SettingHeaderTableSize.String(),
- http2.SettingEnablePush.String(),
- http2.SettingMaxConcurrentStreams.String(),
- http2.SettingInitialWindowSize.String(),
- http2.SettingMaxFrameSize.String(),
- http2.SettingMaxHeaderListSize.String(),
- }
- },
- },
- "quit": command{run: (*h2i).cmdQuit},
- "headers": command{run: (*h2i).cmdHeaders},
-}
-
-func usage() {
- fmt.Fprintf(os.Stderr, "Usage: h2i \n\n")
- flag.PrintDefaults()
-}
-
-// withPort adds ":443" if another port isn't already present.
-func withPort(host string) string {
- if _, _, err := net.SplitHostPort(host); err != nil {
- return net.JoinHostPort(host, "443")
- }
- return host
-}
-
-// h2i is the app's state.
-type h2i struct {
- host string
- tc *tls.Conn
- framer *http2.Framer
- term *terminal.Terminal
-
- // owned by the command loop:
- streamID uint32
- hbuf bytes.Buffer
- henc *hpack.Encoder
-
- // owned by the readFrames loop:
- peerSetting map[http2.SettingID]uint32
- hdec *hpack.Decoder
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if flag.NArg() != 1 {
- usage()
- os.Exit(2)
- }
- log.SetFlags(0)
-
- host := flag.Arg(0)
- app := &h2i{
- host: host,
- peerSetting: make(map[http2.SettingID]uint32),
- }
- app.henc = hpack.NewEncoder(&app.hbuf)
-
- if err := app.Main(); err != nil {
- if app.term != nil {
- app.logf("%v\n", err)
- } else {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- }
- os.Exit(1)
- }
- fmt.Fprintf(os.Stdout, "\n")
-}
-
-func (app *h2i) Main() error {
- cfg := &tls.Config{
- ServerName: app.host,
- NextProtos: strings.Split(*flagNextProto, ","),
- InsecureSkipVerify: *flagInsecure,
- }
-
- hostAndPort := withPort(app.host)
- log.Printf("Connecting to %s ...", hostAndPort)
- tc, err := tls.Dial("tcp", hostAndPort, cfg)
- if err != nil {
- return fmt.Errorf("Error dialing %s: %v", withPort(app.host), err)
- }
- log.Printf("Connected to %v", tc.RemoteAddr())
- defer tc.Close()
-
- if err := tc.Handshake(); err != nil {
- return fmt.Errorf("TLS handshake: %v", err)
- }
- if !*flagInsecure {
- if err := tc.VerifyHostname(app.host); err != nil {
- return fmt.Errorf("VerifyHostname: %v", err)
- }
- }
- state := tc.ConnectionState()
- log.Printf("Negotiated protocol %q", state.NegotiatedProtocol)
- if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" {
- return fmt.Errorf("Could not negotiate protocol mutually")
- }
-
- if _, err := io.WriteString(tc, http2.ClientPreface); err != nil {
- return err
- }
-
- app.framer = http2.NewFramer(tc, tc)
-
- oldState, err := terminal.MakeRaw(0)
- if err != nil {
- return err
- }
- defer terminal.Restore(0, oldState)
-
- var screen = struct {
- io.Reader
- io.Writer
- }{os.Stdin, os.Stdout}
-
- app.term = terminal.NewTerminal(screen, "h2i> ")
- lastWord := regexp.MustCompile(`.+\W(\w+)$`)
- app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {
- if key != '\t' {
- return
- }
- if pos != len(line) {
- // TODO: we're being lazy for now, only supporting tab completion at the end.
- return
- }
- // Auto-complete for the command itself.
- if !strings.Contains(line, " ") {
- var name string
- name, _, ok = lookupCommand(line)
- if !ok {
- return
- }
- return name, len(name), true
- }
- _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')])
- if !ok || c.complete == nil {
- return
- }
- if strings.HasSuffix(line, " ") {
- app.logf("%s", strings.Join(c.complete(), " "))
- return line, pos, true
- }
- m := lastWord.FindStringSubmatch(line)
- if m == nil {
- return line, len(line), true
- }
- soFar := m[1]
- var match []string
- for _, cand := range c.complete() {
- if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) {
- continue
- }
- match = append(match, cand)
- }
- if len(match) == 0 {
- return
- }
- if len(match) > 1 {
- // TODO: auto-complete any common prefix
- app.logf("%s", strings.Join(match, " "))
- return line, pos, true
- }
- newLine = line[:len(line)-len(soFar)] + match[0]
- return newLine, len(newLine), true
-
- }
-
- errc := make(chan error, 2)
- go func() { errc <- app.readFrames() }()
- go func() { errc <- app.readConsole() }()
- return <-errc
-}
-
-func (app *h2i) logf(format string, args ...interface{}) {
- fmt.Fprintf(app.term, format+"\n", args...)
-}
-
-func (app *h2i) readConsole() error {
- if s := *flagSettings; s != "omit" {
- var args []string
- if s != "empty" {
- args = strings.Split(s, ",")
- }
- _, c, ok := lookupCommand("settings")
- if !ok {
- panic("settings command not found")
- }
- c.run(app, args)
- }
-
- for {
- line, err := app.term.ReadLine()
- if err == io.EOF {
- return nil
- }
- if err != nil {
- return fmt.Errorf("terminal.ReadLine: %v", err)
- }
- f := strings.Fields(line)
- if len(f) == 0 {
- continue
- }
- cmd, args := f[0], f[1:]
- if _, c, ok := lookupCommand(cmd); ok {
- err = c.run(app, args)
- } else {
- app.logf("Unknown command %q", line)
- }
- if err == errExitApp {
- return nil
- }
- if err != nil {
- return err
- }
- }
-}
-
-func lookupCommand(prefix string) (name string, c command, ok bool) {
- prefix = strings.ToLower(prefix)
- if c, ok = commands[prefix]; ok {
- return prefix, c, ok
- }
-
- for full, candidate := range commands {
- if strings.HasPrefix(full, prefix) {
- if c.run != nil {
- return "", command{}, false // ambiguous
- }
- c = candidate
- name = full
- }
- }
- return name, c, c.run != nil
-}
-
-var errExitApp = errors.New("internal sentinel error value to quit the console reading loop")
-
-func (a *h2i) cmdQuit(args []string) error {
- if len(args) > 0 {
- a.logf("the QUIT command takes no argument")
- return nil
- }
- return errExitApp
-}
-
-func (a *h2i) cmdSettings(args []string) error {
- if len(args) == 1 && strings.EqualFold(args[0], "ACK") {
- return a.framer.WriteSettingsAck()
- }
- var settings []http2.Setting
- for _, arg := range args {
- if strings.EqualFold(arg, "ACK") {
- a.logf("Error: ACK must be only argument with the SETTINGS command")
- return nil
- }
- eq := strings.Index(arg, "=")
- if eq == -1 {
- a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
- return nil
- }
- sid, ok := settingByName(arg[:eq])
- if !ok {
- a.logf("Error: unknown setting name %q", arg[:eq])
- return nil
- }
- val, err := strconv.ParseUint(arg[eq+1:], 10, 32)
- if err != nil {
- a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
- return nil
- }
- settings = append(settings, http2.Setting{
- ID: sid,
- Val: uint32(val),
- })
- }
- a.logf("Sending: %v", settings)
- return a.framer.WriteSettings(settings...)
-}
-
-func settingByName(name string) (http2.SettingID, bool) {
- for _, sid := range [...]http2.SettingID{
- http2.SettingHeaderTableSize,
- http2.SettingEnablePush,
- http2.SettingMaxConcurrentStreams,
- http2.SettingInitialWindowSize,
- http2.SettingMaxFrameSize,
- http2.SettingMaxHeaderListSize,
- } {
- if strings.EqualFold(sid.String(), name) {
- return sid, true
- }
- }
- return 0, false
-}
-
-func (app *h2i) cmdPing(args []string) error {
- if len(args) > 1 {
- app.logf("invalid PING usage: only accepts 0 or 1 args")
- return nil // nil means don't end the program
- }
- var data [8]byte
- if len(args) == 1 {
- copy(data[:], args[0])
- } else {
- copy(data[:], "h2i_ping")
- }
- return app.framer.WritePing(false, data)
-}
-
-func (app *h2i) cmdHeaders(args []string) error {
- if len(args) > 0 {
- app.logf("Error: HEADERS doesn't yet take arguments.")
- // TODO: flags for restricting window size, to force CONTINUATION
- // frames.
- return nil
- }
- var h1req bytes.Buffer
- app.term.SetPrompt("(as HTTP/1.1)> ")
- defer app.term.SetPrompt("h2i> ")
- for {
- line, err := app.term.ReadLine()
- if err != nil {
- return err
- }
- h1req.WriteString(line)
- h1req.WriteString("\r\n")
- if line == "" {
- break
- }
- }
- req, err := http.ReadRequest(bufio.NewReader(&h1req))
- if err != nil {
- app.logf("Invalid HTTP/1.1 request: %v", err)
- return nil
- }
- if app.streamID == 0 {
- app.streamID = 1
- } else {
- app.streamID += 2
- }
- app.logf("Opening Stream-ID %d:", app.streamID)
- hbf := app.encodeHeaders(req)
- if len(hbf) > 16<<10 {
- app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go")
- return nil
- }
- return app.framer.WriteHeaders(http2.HeadersFrameParam{
- StreamID: app.streamID,
- BlockFragment: hbf,
- EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now
- EndHeaders: true, // for now
- })
-}
-
-func (app *h2i) readFrames() error {
- for {
- f, err := app.framer.ReadFrame()
- if err != nil {
- return fmt.Errorf("ReadFrame: %v", err)
- }
- app.logf("%v", f)
- switch f := f.(type) {
- case *http2.PingFrame:
- app.logf(" Data = %q", f.Data)
- case *http2.SettingsFrame:
- f.ForeachSetting(func(s http2.Setting) error {
- app.logf(" %v", s)
- app.peerSetting[s.ID] = s.Val
- return nil
- })
- case *http2.WindowUpdateFrame:
- app.logf(" Window-Increment = %v\n", f.Increment)
- case *http2.GoAwayFrame:
- app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode)
- case *http2.DataFrame:
- app.logf(" %q", f.Data())
- case *http2.HeadersFrame:
- if f.HasPriority() {
- app.logf(" PRIORITY = %v", f.Priority)
- }
- if app.hdec == nil {
- // TODO: if the user uses h2i to send a SETTINGS frame advertising
- // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE
- // and stuff here instead of using the 4k default. But for now:
- tableSize := uint32(4 << 10)
- app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)
- }
- app.hdec.Write(f.HeaderBlockFragment())
- }
- }
-}
-
-// called from readLoop
-func (app *h2i) onNewHeaderField(f hpack.HeaderField) {
- if f.Sensitive {
- app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value)
- }
- app.logf(" %s = %q", f.Name, f.Value)
-}
-
-func (app *h2i) encodeHeaders(req *http.Request) []byte {
- app.hbuf.Reset()
-
- // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
- host := req.Host
- if host == "" {
- host = req.URL.Host
- }
-
- path := req.URL.Path
- if path == "" {
- path = "/"
- }
-
- app.writeHeader(":authority", host) // probably not right for all sites
- app.writeHeader(":method", req.Method)
- app.writeHeader(":path", path)
- app.writeHeader(":scheme", "https")
-
- for k, vv := range req.Header {
- lowKey := strings.ToLower(k)
- if lowKey == "host" {
- continue
- }
- for _, v := range vv {
- app.writeHeader(lowKey, v)
- }
- }
- return app.hbuf.Bytes()
-}
-
-func (app *h2i) writeHeader(name, value string) {
- app.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
- app.logf(" %s = %s", name, value)
-}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go
deleted file mode 100644
index c2805f6a..00000000
--- a/vendor/golang.org/x/net/http2/headermap.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "net/http"
- "strings"
-)
-
-var (
- commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
- commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
-)
-
-func init() {
- for _, v := range []string{
- "accept",
- "accept-charset",
- "accept-encoding",
- "accept-language",
- "accept-ranges",
- "age",
- "access-control-allow-origin",
- "allow",
- "authorization",
- "cache-control",
- "content-disposition",
- "content-encoding",
- "content-language",
- "content-length",
- "content-location",
- "content-range",
- "content-type",
- "cookie",
- "date",
- "etag",
- "expect",
- "expires",
- "from",
- "host",
- "if-match",
- "if-modified-since",
- "if-none-match",
- "if-unmodified-since",
- "last-modified",
- "link",
- "location",
- "max-forwards",
- "proxy-authenticate",
- "proxy-authorization",
- "range",
- "referer",
- "refresh",
- "retry-after",
- "server",
- "set-cookie",
- "strict-transport-security",
- "trailer",
- "transfer-encoding",
- "user-agent",
- "vary",
- "via",
- "www-authenticate",
- } {
- chk := http.CanonicalHeaderKey(v)
- commonLowerHeader[chk] = v
- commonCanonHeader[v] = chk
- }
-}
-
-func lowerHeader(v string) string {
- if s, ok := commonLowerHeader[v]; ok {
- return s
- }
- return strings.ToLower(v)
-}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
deleted file mode 100644
index f9bb0339..00000000
--- a/vendor/golang.org/x/net/http2/hpack/encode.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package hpack
-
-import (
- "io"
-)
-
-const (
- uint32Max = ^uint32(0)
- initialHeaderTableSize = 4096
-)
-
-type Encoder struct {
- dynTab dynamicTable
- // minSize is the minimum table size set by
- // SetMaxDynamicTableSize after the previous Header Table Size
- // Update.
- minSize uint32
- // maxSizeLimit is the maximum table size this encoder
- // supports. This will protect the encoder from too large
- // size.
- maxSizeLimit uint32
- // tableSizeUpdate indicates whether "Header Table Size
- // Update" is required.
- tableSizeUpdate bool
- w io.Writer
- buf []byte
-}
-
-// NewEncoder returns a new Encoder which performs HPACK encoding. An
-// encoded data is written to w.
-func NewEncoder(w io.Writer) *Encoder {
- e := &Encoder{
- minSize: uint32Max,
- maxSizeLimit: initialHeaderTableSize,
- tableSizeUpdate: false,
- w: w,
- }
- e.dynTab.setMaxSize(initialHeaderTableSize)
- return e
-}
-
-// WriteField encodes f into a single Write to e's underlying Writer.
-// This function may also produce bytes for "Header Table Size Update"
-// if necessary. If produced, it is done before encoding f.
-func (e *Encoder) WriteField(f HeaderField) error {
- e.buf = e.buf[:0]
-
- if e.tableSizeUpdate {
- e.tableSizeUpdate = false
- if e.minSize < e.dynTab.maxSize {
- e.buf = appendTableSize(e.buf, e.minSize)
- }
- e.minSize = uint32Max
- e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
- }
-
- idx, nameValueMatch := e.searchTable(f)
- if nameValueMatch {
- e.buf = appendIndexed(e.buf, idx)
- } else {
- indexing := e.shouldIndex(f)
- if indexing {
- e.dynTab.add(f)
- }
-
- if idx == 0 {
- e.buf = appendNewName(e.buf, f, indexing)
- } else {
- e.buf = appendIndexedName(e.buf, f, idx, indexing)
- }
- }
- n, err := e.w.Write(e.buf)
- if err == nil && n != len(e.buf) {
- err = io.ErrShortWrite
- }
- return err
-}
-
-// searchTable searches f in both stable and dynamic header tables.
-// The static header table is searched first. Only when there is no
-// exact match for both name and value, the dynamic header table is
-// then searched. If there is no match, i is 0. If both name and value
-// match, i is the matched index and nameValueMatch becomes true. If
-// only name matches, i points to that index and nameValueMatch
-// becomes false.
-func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
- for idx, hf := range staticTable {
- if !constantTimeStringCompare(hf.Name, f.Name) {
- continue
- }
- if i == 0 {
- i = uint64(idx + 1)
- }
- if f.Sensitive {
- continue
- }
- if !constantTimeStringCompare(hf.Value, f.Value) {
- continue
- }
- i = uint64(idx + 1)
- nameValueMatch = true
- return
- }
-
- j, nameValueMatch := e.dynTab.search(f)
- if nameValueMatch || (i == 0 && j != 0) {
- i = j + uint64(len(staticTable))
- }
- return
-}
-
-// SetMaxDynamicTableSize changes the dynamic header table size to v.
-// The actual size is bounded by the value passed to
-// SetMaxDynamicTableSizeLimit.
-func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
- if v > e.maxSizeLimit {
- v = e.maxSizeLimit
- }
- if v < e.minSize {
- e.minSize = v
- }
- e.tableSizeUpdate = true
- e.dynTab.setMaxSize(v)
-}
-
-// SetMaxDynamicTableSizeLimit changes the maximum value that can be
-// specified in SetMaxDynamicTableSize to v. By default, it is set to
-// 4096, which is the same size of the default dynamic header table
-// size described in HPACK specification. If the current maximum
-// dynamic header table size is strictly greater than v, "Header Table
-// Size Update" will be done in the next WriteField call and the
-// maximum dynamic header table size is truncated to v.
-func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
- e.maxSizeLimit = v
- if e.dynTab.maxSize > v {
- e.tableSizeUpdate = true
- e.dynTab.setMaxSize(v)
- }
-}
-
-// shouldIndex reports whether f should be indexed.
-func (e *Encoder) shouldIndex(f HeaderField) bool {
- return !f.Sensitive && f.Size() <= e.dynTab.maxSize
-}
-
-// appendIndexed appends index i, as encoded in "Indexed Header Field"
-// representation, to dst and returns the extended buffer.
-func appendIndexed(dst []byte, i uint64) []byte {
- first := len(dst)
- dst = appendVarInt(dst, 7, i)
- dst[first] |= 0x80
- return dst
-}
-
-// appendNewName appends f, as encoded in one of "Literal Header field
-// - New Name" representation variants, to dst and returns the
-// extended buffer.
-//
-// If f.Sensitive is true, "Never Indexed" representation is used. If
-// f.Sensitive is false and indexing is true, "Inremental Indexing"
-// representation is used.
-func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
- dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
- dst = appendHpackString(dst, f.Name)
- return appendHpackString(dst, f.Value)
-}
-
-// appendIndexedName appends f and index i referring indexed name
-// entry, as encoded in one of "Literal Header field - Indexed Name"
-// representation variants, to dst and returns the extended buffer.
-//
-// If f.Sensitive is true, "Never Indexed" representation is used. If
-// f.Sensitive is false and indexing is true, "Incremental Indexing"
-// representation is used.
-func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
- first := len(dst)
- var n byte
- if indexing {
- n = 6
- } else {
- n = 4
- }
- dst = appendVarInt(dst, n, i)
- dst[first] |= encodeTypeByte(indexing, f.Sensitive)
- return appendHpackString(dst, f.Value)
-}
-
-// appendTableSize appends v, as encoded in "Header Table Size Update"
-// representation, to dst and returns the extended buffer.
-func appendTableSize(dst []byte, v uint32) []byte {
- first := len(dst)
- dst = appendVarInt(dst, 5, uint64(v))
- dst[first] |= 0x20
- return dst
-}
-
-// appendVarInt appends i, as encoded in variable integer form using n
-// bit prefix, to dst and returns the extended buffer.
-//
-// See
-// http://http2.github.io/http2-spec/compression.html#integer.representation
-func appendVarInt(dst []byte, n byte, i uint64) []byte {
- k := uint64((1 << n) - 1)
- if i < k {
- return append(dst, byte(i))
- }
- dst = append(dst, byte(k))
- i -= k
- for ; i >= 128; i >>= 7 {
- dst = append(dst, byte(0x80|(i&0x7f)))
- }
- return append(dst, byte(i))
-}
-
-// appendHpackString appends s, as encoded in "String Literal"
-// representation, to dst and returns the the extended buffer.
-//
-// s will be encoded in Huffman codes only when it produces strictly
-// shorter byte string.
-func appendHpackString(dst []byte, s string) []byte {
- huffmanLength := HuffmanEncodeLength(s)
- if huffmanLength < uint64(len(s)) {
- first := len(dst)
- dst = appendVarInt(dst, 7, huffmanLength)
- dst = AppendHuffmanString(dst, s)
- dst[first] |= 0x80
- } else {
- dst = appendVarInt(dst, 7, uint64(len(s)))
- dst = append(dst, s...)
- }
- return dst
-}
-
-// encodeTypeByte returns type byte. If sensitive is true, type byte
-// for "Never Indexed" representation is returned. If sensitive is
-// false and indexing is true, type byte for "Incremental Indexing"
-// representation is returned. Otherwise, type byte for "Without
-// Indexing" is returned.
-func encodeTypeByte(indexing, sensitive bool) byte {
- if sensitive {
- return 0x10
- }
- if indexing {
- return 0x40
- }
- return 0
-}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go
deleted file mode 100644
index 92286f3b..00000000
--- a/vendor/golang.org/x/net/http2/hpack/encode_test.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package hpack
-
-import (
- "bytes"
- "encoding/hex"
- "reflect"
- "strings"
- "testing"
-)
-
-func TestEncoderTableSizeUpdate(t *testing.T) {
- tests := []struct {
- size1, size2 uint32
- wantHex string
- }{
- // Should emit 2 table size updates (2048 and 4096)
- {2048, 4096, "3fe10f 3fe11f 82"},
-
- // Should emit 1 table size update (2048)
- {16384, 2048, "3fe10f 82"},
- }
- for _, tt := range tests {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
- e.SetMaxDynamicTableSize(tt.size1)
- e.SetMaxDynamicTableSize(tt.size2)
- if err := e.WriteField(pair(":method", "GET")); err != nil {
- t.Fatal(err)
- }
- want := removeSpace(tt.wantHex)
- if got := hex.EncodeToString(buf.Bytes()); got != want {
- t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
- }
- }
-}
-
-func TestEncoderWriteField(t *testing.T) {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
- var got []HeaderField
- d := NewDecoder(4<<10, func(f HeaderField) {
- got = append(got, f)
- })
-
- tests := []struct {
- hdrs []HeaderField
- }{
- {[]HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- }},
- {[]HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- pair("cache-control", "no-cache"),
- }},
- {[]HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "https"),
- pair(":path", "/index.html"),
- pair(":authority", "www.example.com"),
- pair("custom-key", "custom-value"),
- }},
- }
- for i, tt := range tests {
- buf.Reset()
- got = got[:0]
- for _, hf := range tt.hdrs {
- if err := e.WriteField(hf); err != nil {
- t.Fatal(err)
- }
- }
- _, err := d.Write(buf.Bytes())
- if err != nil {
- t.Errorf("%d. Decoder Write = %v", i, err)
- }
- if !reflect.DeepEqual(got, tt.hdrs) {
- t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
- }
- }
-}
-
-func TestEncoderSearchTable(t *testing.T) {
- e := NewEncoder(nil)
-
- e.dynTab.add(pair("foo", "bar"))
- e.dynTab.add(pair("blake", "miz"))
- e.dynTab.add(pair(":method", "GET"))
-
- tests := []struct {
- hf HeaderField
- wantI uint64
- wantMatch bool
- }{
- // Name and Value match
- {pair("foo", "bar"), uint64(len(staticTable) + 3), true},
- {pair("blake", "miz"), uint64(len(staticTable) + 2), true},
- {pair(":method", "GET"), 2, true},
-
- // Only name match because Sensitive == true
- {HeaderField{":method", "GET", true}, 2, false},
-
- // Only Name matches
- {pair("foo", "..."), uint64(len(staticTable) + 3), false},
- {pair("blake", "..."), uint64(len(staticTable) + 2), false},
- {pair(":method", "..."), 2, false},
-
- // None match
- {pair("foo-", "bar"), 0, false},
- }
- for _, tt := range tests {
- if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
- t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
- }
- }
-}
-
-func TestAppendVarInt(t *testing.T) {
- tests := []struct {
- n byte
- i uint64
- want []byte
- }{
- // Fits in a byte:
- {1, 0, []byte{0}},
- {2, 2, []byte{2}},
- {3, 6, []byte{6}},
- {4, 14, []byte{14}},
- {5, 30, []byte{30}},
- {6, 62, []byte{62}},
- {7, 126, []byte{126}},
- {8, 254, []byte{254}},
-
- // Multiple bytes:
- {5, 1337, []byte{31, 154, 10}},
- }
- for _, tt := range tests {
- got := appendVarInt(nil, tt.n, tt.i)
- if !bytes.Equal(got, tt.want) {
- t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
- }
- }
-}
-
-func TestAppendHpackString(t *testing.T) {
- tests := []struct {
- s, wantHex string
- }{
- // Huffman encoded
- {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
-
- // Not Huffman encoded
- {"a", "01 61"},
-
- // zero length
- {"", "00"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendHpackString(nil, tt.s)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
- }
- }
-}
-
-func TestAppendIndexed(t *testing.T) {
- tests := []struct {
- i uint64
- wantHex string
- }{
- // 1 byte
- {1, "81"},
- {126, "fe"},
-
- // 2 bytes
- {127, "ff00"},
- {128, "ff01"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendIndexed(nil, tt.i)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
- }
- }
-}
-
-func TestAppendNewName(t *testing.T) {
- tests := []struct {
- f HeaderField
- indexing bool
- wantHex string
- }{
- // Incremental indexing
- {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
-
- // Without indexing
- {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
-
- // Never indexed
- {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
- {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendNewName(nil, tt.f, tt.indexing)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
- }
- }
-}
-
-func TestAppendIndexedName(t *testing.T) {
- tests := []struct {
- f HeaderField
- i uint64
- indexing bool
- wantHex string
- }{
- // Incremental indexing
- {HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
-
- // Without indexing
- {HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
-
- // Never indexed
- {HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
- {HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
- }
- }
-}
-
-func TestAppendTableSize(t *testing.T) {
- tests := []struct {
- i uint32
- wantHex string
- }{
- // Fits into 1 byte
- {30, "3e"},
-
- // Extra byte
- {31, "3f00"},
- {32, "3f01"},
- }
- for _, tt := range tests {
- want := removeSpace(tt.wantHex)
- buf := appendTableSize(nil, tt.i)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
- }
- }
-}
-
-func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
- tests := []struct {
- v uint32
- wantUpdate bool
- wantMinSize uint32
- wantMaxSize uint32
- }{
- // Set new table size to 2048
- {2048, true, 2048, 2048},
-
- // Set new table size to 16384, but still limited to
- // 4096
- {16384, true, 2048, 4096},
- }
- for _, tt := range tests {
- e.SetMaxDynamicTableSize(tt.v)
- if got := e.tableSizeUpdate; tt.wantUpdate != got {
- t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
- }
- if got := e.minSize; tt.wantMinSize != got {
- t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
- }
- if got := e.dynTab.maxSize; tt.wantMaxSize != got {
- t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
- }
- }
-}
-
-func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
- e := NewEncoder(nil)
- // 4095 < initialHeaderTableSize means maxSize is truncated to
- // 4095.
- e.SetMaxDynamicTableSizeLimit(4095)
- if got, want := e.dynTab.maxSize, uint32(4095); got != want {
- t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
- }
- if got, want := e.maxSizeLimit, uint32(4095); got != want {
- t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
- }
- if got, want := e.tableSizeUpdate, true; got != want {
- t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
- }
- // maxSize will be truncated to maxSizeLimit
- e.SetMaxDynamicTableSize(16384)
- if got, want := e.dynTab.maxSize, uint32(4095); got != want {
- t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
- }
- // 8192 > current maxSizeLimit, so maxSize does not change.
- e.SetMaxDynamicTableSizeLimit(8192)
- if got, want := e.dynTab.maxSize, uint32(4095); got != want {
- t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
- }
- if got, want := e.maxSizeLimit, uint32(8192); got != want {
- t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
- }
-}
-
-func removeSpace(s string) string {
- return strings.Replace(s, " ", "", -1)
-}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
deleted file mode 100644
index dcf257af..00000000
--- a/vendor/golang.org/x/net/http2/hpack/hpack.go
+++ /dev/null
@@ -1,542 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package hpack implements HPACK, a compression format for
-// efficiently representing HTTP header fields in the context of HTTP/2.
-//
-// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
-package hpack
-
-import (
- "bytes"
- "errors"
- "fmt"
-)
-
-// A DecodingError is something the spec defines as a decoding error.
-type DecodingError struct {
- Err error
-}
-
-func (de DecodingError) Error() string {
- return fmt.Sprintf("decoding error: %v", de.Err)
-}
-
-// An InvalidIndexError is returned when an encoder references a table
-// entry before the static table or after the end of the dynamic table.
-type InvalidIndexError int
-
-func (e InvalidIndexError) Error() string {
- return fmt.Sprintf("invalid indexed representation index %d", int(e))
-}
-
-// A HeaderField is a name-value pair. Both the name and value are
-// treated as opaque sequences of octets.
-type HeaderField struct {
- Name, Value string
-
- // Sensitive means that this header field should never be
- // indexed.
- Sensitive bool
-}
-
-// IsPseudo reports whether the header field is an http2 pseudo header.
-// That is, it reports whether it starts with a colon.
-// It is not otherwise guaranteed to be a valid psuedo header field,
-// though.
-func (hf HeaderField) IsPseudo() bool {
- return len(hf.Name) != 0 && hf.Name[0] == ':'
-}
-
-func (hf HeaderField) String() string {
- var suffix string
- if hf.Sensitive {
- suffix = " (sensitive)"
- }
- return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
-}
-
-// Size returns the size of an entry per RFC 7540 section 5.2.
-func (hf HeaderField) Size() uint32 {
- // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
- // "The size of the dynamic table is the sum of the size of
- // its entries. The size of an entry is the sum of its name's
- // length in octets (as defined in Section 5.2), its value's
- // length in octets (see Section 5.2), plus 32. The size of
- // an entry is calculated using the length of the name and
- // value without any Huffman encoding applied."
-
- // This can overflow if somebody makes a large HeaderField
- // Name and/or Value by hand, but we don't care, because that
- // won't happen on the wire because the encoding doesn't allow
- // it.
- return uint32(len(hf.Name) + len(hf.Value) + 32)
-}
-
-// A Decoder is the decoding context for incremental processing of
-// header blocks.
-type Decoder struct {
- dynTab dynamicTable
- emit func(f HeaderField)
-
- emitEnabled bool // whether calls to emit are enabled
- maxStrLen int // 0 means unlimited
-
- // buf is the unparsed buffer. It's only written to
- // saveBuf if it was truncated in the middle of a header
- // block. Because it's usually not owned, we can only
- // process it under Write.
- buf []byte // not owned; only valid during Write
-
- // saveBuf is previous data passed to Write which we weren't able
- // to fully parse before. Unlike buf, we own this data.
- saveBuf bytes.Buffer
-}
-
-// NewDecoder returns a new decoder with the provided maximum dynamic
-// table size. The emitFunc will be called for each valid field
-// parsed, in the same goroutine as calls to Write, before Write returns.
-func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
- d := &Decoder{
- emit: emitFunc,
- emitEnabled: true,
- }
- d.dynTab.allowedMaxSize = maxDynamicTableSize
- d.dynTab.setMaxSize(maxDynamicTableSize)
- return d
-}
-
-// ErrStringLength is returned by Decoder.Write when the max string length
-// (as configured by Decoder.SetMaxStringLength) would be violated.
-var ErrStringLength = errors.New("hpack: string too long")
-
-// SetMaxStringLength sets the maximum size of a HeaderField name or
-// value string. If a string exceeds this length (even after any
-// decompression), Write will return ErrStringLength.
-// A value of 0 means unlimited and is the default from NewDecoder.
-func (d *Decoder) SetMaxStringLength(n int) {
- d.maxStrLen = n
-}
-
-// SetEmitFunc changes the callback used when new header fields
-// are decoded.
-// It must be non-nil. It does not affect EmitEnabled.
-func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
- d.emit = emitFunc
-}
-
-// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
-// should be called. The default is true.
-//
-// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
-// while still decoding and keeping in-sync with decoder state, but
-// without doing unnecessary decompression or generating unnecessary
-// garbage for header fields past the limit.
-func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
-
-// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
-// are currently enabled. The default is true.
-func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
-
-// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
-// underlying buffers for garbage reasons.
-
-func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
- d.dynTab.setMaxSize(v)
-}
-
-// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
-// stream (via dynamic table size updates) may set the maximum size
-// to.
-func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
- d.dynTab.allowedMaxSize = v
-}
-
-type dynamicTable struct {
- // ents is the FIFO described at
- // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
- // The newest (low index) is append at the end, and items are
- // evicted from the front.
- ents []HeaderField
- size uint32
- maxSize uint32 // current maxSize
- allowedMaxSize uint32 // maxSize may go up to this, inclusive
-}
-
-func (dt *dynamicTable) setMaxSize(v uint32) {
- dt.maxSize = v
- dt.evict()
-}
-
-// TODO: change dynamicTable to be a struct with a slice and a size int field,
-// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
-//
-//
-// Then make add increment the size. maybe the max size should move from Decoder to
-// dynamicTable and add should return an ok bool if there was enough space.
-//
-// Later we'll need a remove operation on dynamicTable.
-
-func (dt *dynamicTable) add(f HeaderField) {
- dt.ents = append(dt.ents, f)
- dt.size += f.Size()
- dt.evict()
-}
-
-// If we're too big, evict old stuff (front of the slice)
-func (dt *dynamicTable) evict() {
- base := dt.ents // keep base pointer of slice
- for dt.size > dt.maxSize {
- dt.size -= dt.ents[0].Size()
- dt.ents = dt.ents[1:]
- }
-
- // Shift slice contents down if we evicted things.
- if len(dt.ents) != len(base) {
- copy(base, dt.ents)
- dt.ents = base[:len(dt.ents)]
- }
-}
-
-// constantTimeStringCompare compares string a and b in a constant
-// time manner.
-func constantTimeStringCompare(a, b string) bool {
- if len(a) != len(b) {
- return false
- }
-
- c := byte(0)
-
- for i := 0; i < len(a); i++ {
- c |= a[i] ^ b[i]
- }
-
- return c == 0
-}
-
-// Search searches f in the table. The return value i is 0 if there is
-// no name match. If there is name match or name/value match, i is the
-// index of that entry (1-based). If both name and value match,
-// nameValueMatch becomes true.
-func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
- l := len(dt.ents)
- for j := l - 1; j >= 0; j-- {
- ent := dt.ents[j]
- if !constantTimeStringCompare(ent.Name, f.Name) {
- continue
- }
- if i == 0 {
- i = uint64(l - j)
- }
- if f.Sensitive {
- continue
- }
- if !constantTimeStringCompare(ent.Value, f.Value) {
- continue
- }
- i = uint64(l - j)
- nameValueMatch = true
- return
- }
- return
-}
-
-func (d *Decoder) maxTableIndex() int {
- return len(d.dynTab.ents) + len(staticTable)
-}
-
-func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
- if i < 1 {
- return
- }
- if i > uint64(d.maxTableIndex()) {
- return
- }
- if i <= uint64(len(staticTable)) {
- return staticTable[i-1], true
- }
- dents := d.dynTab.ents
- return dents[len(dents)-(int(i)-len(staticTable))], true
-}
-
-// Decode decodes an entire block.
-//
-// TODO: remove this method and make it incremental later? This is
-// easier for debugging now.
-func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
- var hf []HeaderField
- saveFunc := d.emit
- defer func() { d.emit = saveFunc }()
- d.emit = func(f HeaderField) { hf = append(hf, f) }
- if _, err := d.Write(p); err != nil {
- return nil, err
- }
- if err := d.Close(); err != nil {
- return nil, err
- }
- return hf, nil
-}
-
-func (d *Decoder) Close() error {
- if d.saveBuf.Len() > 0 {
- d.saveBuf.Reset()
- return DecodingError{errors.New("truncated headers")}
- }
- return nil
-}
-
-func (d *Decoder) Write(p []byte) (n int, err error) {
- if len(p) == 0 {
- // Prevent state machine CPU attacks (making us redo
- // work up to the point of finding out we don't have
- // enough data)
- return
- }
- // Only copy the data if we have to. Optimistically assume
- // that p will contain a complete header block.
- if d.saveBuf.Len() == 0 {
- d.buf = p
- } else {
- d.saveBuf.Write(p)
- d.buf = d.saveBuf.Bytes()
- d.saveBuf.Reset()
- }
-
- for len(d.buf) > 0 {
- err = d.parseHeaderFieldRepr()
- if err == errNeedMore {
- // Extra paranoia, making sure saveBuf won't
- // get too large. All the varint and string
- // reading code earlier should already catch
- // overlong things and return ErrStringLength,
- // but keep this as a last resort.
- const varIntOverhead = 8 // conservative
- if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
- return 0, ErrStringLength
- }
- d.saveBuf.Write(d.buf)
- return len(p), nil
- }
- if err != nil {
- break
- }
- }
- return len(p), err
-}
-
-// errNeedMore is an internal sentinel error value that means the
-// buffer is truncated and we need to read more data before we can
-// continue parsing.
-var errNeedMore = errors.New("need more data")
-
-type indexType int
-
-const (
- indexedTrue indexType = iota
- indexedFalse
- indexedNever
-)
-
-func (v indexType) indexed() bool { return v == indexedTrue }
-func (v indexType) sensitive() bool { return v == indexedNever }
-
-// returns errNeedMore if there isn't enough data available.
-// any other error is fatal.
-// consumes d.buf iff it returns nil.
-// precondition: must be called with len(d.buf) > 0
-func (d *Decoder) parseHeaderFieldRepr() error {
- b := d.buf[0]
- switch {
- case b&128 != 0:
- // Indexed representation.
- // High bit set?
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
- return d.parseFieldIndexed()
- case b&192 == 64:
- // 6.2.1 Literal Header Field with Incremental Indexing
- // 0b10xxxxxx: top two bits are 10
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
- return d.parseFieldLiteral(6, indexedTrue)
- case b&240 == 0:
- // 6.2.2 Literal Header Field without Indexing
- // 0b0000xxxx: top four bits are 0000
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
- return d.parseFieldLiteral(4, indexedFalse)
- case b&240 == 16:
- // 6.2.3 Literal Header Field never Indexed
- // 0b0001xxxx: top four bits are 0001
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
- return d.parseFieldLiteral(4, indexedNever)
- case b&224 == 32:
- // 6.3 Dynamic Table Size Update
- // Top three bits are '001'.
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
- return d.parseDynamicTableSizeUpdate()
- }
-
- return DecodingError{errors.New("invalid encoding")}
-}
-
-// (same invariants and behavior as parseHeaderFieldRepr)
-func (d *Decoder) parseFieldIndexed() error {
- buf := d.buf
- idx, buf, err := readVarInt(7, buf)
- if err != nil {
- return err
- }
- hf, ok := d.at(idx)
- if !ok {
- return DecodingError{InvalidIndexError(idx)}
- }
- d.buf = buf
- return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
-}
-
-// (same invariants and behavior as parseHeaderFieldRepr)
-func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
- buf := d.buf
- nameIdx, buf, err := readVarInt(n, buf)
- if err != nil {
- return err
- }
-
- var hf HeaderField
- wantStr := d.emitEnabled || it.indexed()
- if nameIdx > 0 {
- ihf, ok := d.at(nameIdx)
- if !ok {
- return DecodingError{InvalidIndexError(nameIdx)}
- }
- hf.Name = ihf.Name
- } else {
- hf.Name, buf, err = d.readString(buf, wantStr)
- if err != nil {
- return err
- }
- }
- hf.Value, buf, err = d.readString(buf, wantStr)
- if err != nil {
- return err
- }
- d.buf = buf
- if it.indexed() {
- d.dynTab.add(hf)
- }
- hf.Sensitive = it.sensitive()
- return d.callEmit(hf)
-}
-
-func (d *Decoder) callEmit(hf HeaderField) error {
- if d.maxStrLen != 0 {
- if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
- return ErrStringLength
- }
- }
- if d.emitEnabled {
- d.emit(hf)
- }
- return nil
-}
-
-// (same invariants and behavior as parseHeaderFieldRepr)
-func (d *Decoder) parseDynamicTableSizeUpdate() error {
- buf := d.buf
- size, buf, err := readVarInt(5, buf)
- if err != nil {
- return err
- }
- if size > uint64(d.dynTab.allowedMaxSize) {
- return DecodingError{errors.New("dynamic table size update too large")}
- }
- d.dynTab.setMaxSize(uint32(size))
- d.buf = buf
- return nil
-}
-
-var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
-
-// readVarInt reads an unsigned variable length integer off the
-// beginning of p. n is the parameter as described in
-// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
-//
-// n must always be between 1 and 8.
-//
-// The returned remain buffer is either a smaller suffix of p, or err != nil.
-// The error is errNeedMore if p doesn't contain a complete integer.
-func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
- if n < 1 || n > 8 {
- panic("bad n")
- }
- if len(p) == 0 {
- return 0, p, errNeedMore
- }
- i = uint64(p[0])
- if n < 8 {
- i &= (1 << uint64(n)) - 1
- }
- if i < (1< 0 {
- b := p[0]
- p = p[1:]
- i += uint64(b&127) << m
- if b&128 == 0 {
- return i, p, nil
- }
- m += 7
- if m >= 63 { // TODO: proper overflow check. making this up.
- return 0, origP, errVarintOverflow
- }
- }
- return 0, origP, errNeedMore
-}
-
-// readString decodes an hpack string from p.
-//
-// wantStr is whether s will be used. If false, decompression and
-// []byte->string garbage are skipped if s will be ignored
-// anyway. This does mean that huffman decoding errors for non-indexed
-// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
-// is returning an error anyway, and because they're not indexed, the error
-// won't affect the decoding state.
-func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
- if len(p) == 0 {
- return "", p, errNeedMore
- }
- isHuff := p[0]&128 != 0
- strLen, p, err := readVarInt(7, p)
- if err != nil {
- return "", p, err
- }
- if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
- return "", nil, ErrStringLength
- }
- if uint64(len(p)) < strLen {
- return "", p, errNeedMore
- }
- if !isHuff {
- if wantStr {
- s = string(p[:strLen])
- }
- return s, p[strLen:], nil
- }
-
- if wantStr {
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset() // don't trust others
- defer bufPool.Put(buf)
- if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
- buf.Reset()
- return "", nil, err
- }
- s = buf.String()
- buf.Reset() // be nice to GC
- }
- return s, p[strLen:], nil
-}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go
deleted file mode 100644
index 6dc69f95..00000000
--- a/vendor/golang.org/x/net/http2/hpack/hpack_test.go
+++ /dev/null
@@ -1,813 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package hpack
-
-import (
- "bufio"
- "bytes"
- "encoding/hex"
- "fmt"
- "math/rand"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "testing"
- "time"
-)
-
-func TestStaticTable(t *testing.T) {
- fromSpec := `
- +-------+-----------------------------+---------------+
- | 1 | :authority | |
- | 2 | :method | GET |
- | 3 | :method | POST |
- | 4 | :path | / |
- | 5 | :path | /index.html |
- | 6 | :scheme | http |
- | 7 | :scheme | https |
- | 8 | :status | 200 |
- | 9 | :status | 204 |
- | 10 | :status | 206 |
- | 11 | :status | 304 |
- | 12 | :status | 400 |
- | 13 | :status | 404 |
- | 14 | :status | 500 |
- | 15 | accept-charset | |
- | 16 | accept-encoding | gzip, deflate |
- | 17 | accept-language | |
- | 18 | accept-ranges | |
- | 19 | accept | |
- | 20 | access-control-allow-origin | |
- | 21 | age | |
- | 22 | allow | |
- | 23 | authorization | |
- | 24 | cache-control | |
- | 25 | content-disposition | |
- | 26 | content-encoding | |
- | 27 | content-language | |
- | 28 | content-length | |
- | 29 | content-location | |
- | 30 | content-range | |
- | 31 | content-type | |
- | 32 | cookie | |
- | 33 | date | |
- | 34 | etag | |
- | 35 | expect | |
- | 36 | expires | |
- | 37 | from | |
- | 38 | host | |
- | 39 | if-match | |
- | 40 | if-modified-since | |
- | 41 | if-none-match | |
- | 42 | if-range | |
- | 43 | if-unmodified-since | |
- | 44 | last-modified | |
- | 45 | link | |
- | 46 | location | |
- | 47 | max-forwards | |
- | 48 | proxy-authenticate | |
- | 49 | proxy-authorization | |
- | 50 | range | |
- | 51 | referer | |
- | 52 | refresh | |
- | 53 | retry-after | |
- | 54 | server | |
- | 55 | set-cookie | |
- | 56 | strict-transport-security | |
- | 57 | transfer-encoding | |
- | 58 | user-agent | |
- | 59 | vary | |
- | 60 | via | |
- | 61 | www-authenticate | |
- +-------+-----------------------------+---------------+
-`
- bs := bufio.NewScanner(strings.NewReader(fromSpec))
- re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
- for bs.Scan() {
- l := bs.Text()
- if !strings.Contains(l, "|") {
- continue
- }
- m := re.FindStringSubmatch(l)
- if m == nil {
- continue
- }
- i, err := strconv.Atoi(m[1])
- if err != nil {
- t.Errorf("Bogus integer on line %q", l)
- continue
- }
- if i < 1 || i > len(staticTable) {
- t.Errorf("Bogus index %d on line %q", i, l)
- continue
- }
- if got, want := staticTable[i-1].Name, m[2]; got != want {
- t.Errorf("header index %d name = %q; want %q", i, got, want)
- }
- if got, want := staticTable[i-1].Value, m[3]; got != want {
- t.Errorf("header index %d value = %q; want %q", i, got, want)
- }
- }
- if err := bs.Err(); err != nil {
- t.Error(err)
- }
-}
-
-func (d *Decoder) mustAt(idx int) HeaderField {
- if hf, ok := d.at(uint64(idx)); !ok {
- panic(fmt.Sprintf("bogus index %d", idx))
- } else {
- return hf
- }
-}
-
-func TestDynamicTableAt(t *testing.T) {
- d := NewDecoder(4096, nil)
- at := d.mustAt
- if got, want := at(2), (pair(":method", "GET")); got != want {
- t.Errorf("at(2) = %v; want %v", got, want)
- }
- d.dynTab.add(pair("foo", "bar"))
- d.dynTab.add(pair("blake", "miz"))
- if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want {
- t.Errorf("at(dyn 1) = %v; want %v", got, want)
- }
- if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want {
- t.Errorf("at(dyn 2) = %v; want %v", got, want)
- }
- if got, want := at(3), (pair(":method", "POST")); got != want {
- t.Errorf("at(3) = %v; want %v", got, want)
- }
-}
-
-func TestDynamicTableSearch(t *testing.T) {
- dt := dynamicTable{}
- dt.setMaxSize(4096)
-
- dt.add(pair("foo", "bar"))
- dt.add(pair("blake", "miz"))
- dt.add(pair(":method", "GET"))
-
- tests := []struct {
- hf HeaderField
- wantI uint64
- wantMatch bool
- }{
- // Name and Value match
- {pair("foo", "bar"), 3, true},
- {pair(":method", "GET"), 1, true},
-
- // Only name match because of Sensitive == true
- {HeaderField{"blake", "miz", true}, 2, false},
-
- // Only Name matches
- {pair("foo", "..."), 3, false},
- {pair("blake", "..."), 2, false},
- {pair(":method", "..."), 1, false},
-
- // None match
- {pair("foo-", "bar"), 0, false},
- }
- for _, tt := range tests {
- if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
- t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
- }
- }
-}
-
-func TestDynamicTableSizeEvict(t *testing.T) {
- d := NewDecoder(4096, nil)
- if want := uint32(0); d.dynTab.size != want {
- t.Fatalf("size = %d; want %d", d.dynTab.size, want)
- }
- add := d.dynTab.add
- add(pair("blake", "eats pizza"))
- if want := uint32(15 + 32); d.dynTab.size != want {
- t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want)
- }
- add(pair("foo", "bar"))
- if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {
- t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want)
- }
- d.dynTab.setMaxSize(15 + 32 + 1 /* slop */)
- if want := uint32(6 + 32); d.dynTab.size != want {
- t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
- }
- if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want {
- t.Errorf("at(dyn 1) = %v; want %v", got, want)
- }
- add(pair("long", strings.Repeat("x", 500)))
- if want := uint32(0); d.dynTab.size != want {
- t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want)
- }
-}
-
-func TestDecoderDecode(t *testing.T) {
- tests := []struct {
- name string
- in []byte
- want []HeaderField
- wantDynTab []HeaderField // newest entry first
- }{
- // C.2.1 Literal Header Field with Indexing
- // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1
- {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"),
- []HeaderField{pair("custom-key", "custom-header")},
- []HeaderField{pair("custom-key", "custom-header")},
- },
-
- // C.2.2 Literal Header Field without Indexing
- // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2
- {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"),
- []HeaderField{pair(":path", "/sample/path")},
- []HeaderField{}},
-
- // C.2.3 Literal Header Field never Indexed
- // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3
- {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"),
- []HeaderField{{"password", "secret", true}},
- []HeaderField{}},
-
- // C.2.4 Indexed Header Field
- // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4
- {"C.2.4", []byte("\x82"),
- []HeaderField{pair(":method", "GET")},
- []HeaderField{}},
- }
- for _, tt := range tests {
- d := NewDecoder(4096, nil)
- hf, err := d.DecodeFull(tt.in)
- if err != nil {
- t.Errorf("%s: %v", tt.name, err)
- continue
- }
- if !reflect.DeepEqual(hf, tt.want) {
- t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want)
- }
- gotDynTab := d.dynTab.reverseCopy()
- if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {
- t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab)
- }
- }
-}
-
-func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
- hf = make([]HeaderField, len(dt.ents))
- for i := range hf {
- hf[i] = dt.ents[len(dt.ents)-1-i]
- }
- return
-}
-
-type encAndWant struct {
- enc []byte
- want []HeaderField
- wantDynTab []HeaderField
- wantDynSize uint32
-}
-
-// C.3 Request Examples without Huffman Coding
-// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3
-func TestDecodeC3_NoHuffman(t *testing.T) {
- testDecodeSeries(t, 4096, []encAndWant{
- {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- },
- []HeaderField{
- pair(":authority", "www.example.com"),
- },
- 57,
- },
- {dehex("8286 84be 5808 6e6f 2d63 6163 6865"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- pair("cache-control", "no-cache"),
- },
- []HeaderField{
- pair("cache-control", "no-cache"),
- pair(":authority", "www.example.com"),
- },
- 110,
- },
- {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "https"),
- pair(":path", "/index.html"),
- pair(":authority", "www.example.com"),
- pair("custom-key", "custom-value"),
- },
- []HeaderField{
- pair("custom-key", "custom-value"),
- pair("cache-control", "no-cache"),
- pair(":authority", "www.example.com"),
- },
- 164,
- },
- })
-}
-
-// C.4 Request Examples with Huffman Coding
-// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4
-func TestDecodeC4_Huffman(t *testing.T) {
- testDecodeSeries(t, 4096, []encAndWant{
- {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- },
- []HeaderField{
- pair(":authority", "www.example.com"),
- },
- 57,
- },
- {dehex("8286 84be 5886 a8eb 1064 9cbf"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "http"),
- pair(":path", "/"),
- pair(":authority", "www.example.com"),
- pair("cache-control", "no-cache"),
- },
- []HeaderField{
- pair("cache-control", "no-cache"),
- pair(":authority", "www.example.com"),
- },
- 110,
- },
- {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"),
- []HeaderField{
- pair(":method", "GET"),
- pair(":scheme", "https"),
- pair(":path", "/index.html"),
- pair(":authority", "www.example.com"),
- pair("custom-key", "custom-value"),
- },
- []HeaderField{
- pair("custom-key", "custom-value"),
- pair("cache-control", "no-cache"),
- pair(":authority", "www.example.com"),
- },
- 164,
- },
- })
-}
-
-// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5
-// "This section shows several consecutive header lists, corresponding
-// to HTTP responses, on the same connection. The HTTP/2 setting
-// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
-// octets, causing some evictions to occur."
-func TestDecodeC5_ResponsesNoHuff(t *testing.T) {
- testDecodeSeries(t, 256, []encAndWant{
- {dehex(`
-4803 3330 3258 0770 7269 7661 7465 611d
-4d6f 6e2c 2032 3120 4f63 7420 3230 3133
-2032 303a 3133 3a32 3120 474d 546e 1768
-7474 7073 3a2f 2f77 7777 2e65 7861 6d70
-6c65 2e63 6f6d
-`),
- []HeaderField{
- pair(":status", "302"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("location", "https://www.example.com"),
- },
- []HeaderField{
- pair("location", "https://www.example.com"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("cache-control", "private"),
- pair(":status", "302"),
- },
- 222,
- },
- {dehex("4803 3330 37c1 c0bf"),
- []HeaderField{
- pair(":status", "307"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("location", "https://www.example.com"),
- },
- []HeaderField{
- pair(":status", "307"),
- pair("location", "https://www.example.com"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("cache-control", "private"),
- },
- 222,
- },
- {dehex(`
-88c1 611d 4d6f 6e2c 2032 3120 4f63 7420
-3230 3133 2032 303a 3133 3a32 3220 474d
-54c0 5a04 677a 6970 7738 666f 6f3d 4153
-444a 4b48 514b 425a 584f 5157 454f 5049
-5541 5851 5745 4f49 553b 206d 6178 2d61
-6765 3d33 3630 303b 2076 6572 7369 6f6e
-3d31
-`),
- []HeaderField{
- pair(":status", "200"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
- pair("location", "https://www.example.com"),
- pair("content-encoding", "gzip"),
- pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
- },
- []HeaderField{
- pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
- pair("content-encoding", "gzip"),
- pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
- },
- 215,
- },
- })
-}
-
-// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6
-// "This section shows the same examples as the previous section, but
-// using Huffman encoding for the literal values. The HTTP/2 setting
-// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
-// octets, causing some evictions to occur. The eviction mechanism
-// uses the length of the decoded literal values, so the same
-// evictions occurs as in the previous section."
-func TestDecodeC6_ResponsesHuffman(t *testing.T) {
- testDecodeSeries(t, 256, []encAndWant{
- {dehex(`
-4882 6402 5885 aec3 771a 4b61 96d0 7abe
-9410 54d4 44a8 2005 9504 0b81 66e0 82a6
-2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8
-e9ae 82ae 43d3
-`),
- []HeaderField{
- pair(":status", "302"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("location", "https://www.example.com"),
- },
- []HeaderField{
- pair("location", "https://www.example.com"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("cache-control", "private"),
- pair(":status", "302"),
- },
- 222,
- },
- {dehex("4883 640e ffc1 c0bf"),
- []HeaderField{
- pair(":status", "307"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("location", "https://www.example.com"),
- },
- []HeaderField{
- pair(":status", "307"),
- pair("location", "https://www.example.com"),
- pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
- pair("cache-control", "private"),
- },
- 222,
- },
- {dehex(`
-88c1 6196 d07a be94 1054 d444 a820 0595
-040b 8166 e084 a62d 1bff c05a 839b d9ab
-77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b
-3960 d5af 2708 7f36 72c1 ab27 0fb5 291f
-9587 3160 65c0 03ed 4ee5 b106 3d50 07
-`),
- []HeaderField{
- pair(":status", "200"),
- pair("cache-control", "private"),
- pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
- pair("location", "https://www.example.com"),
- pair("content-encoding", "gzip"),
- pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
- },
- []HeaderField{
- pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
- pair("content-encoding", "gzip"),
- pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
- },
- 215,
- },
- })
-}
-
-func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {
- d := NewDecoder(size, nil)
- for i, step := range steps {
- hf, err := d.DecodeFull(step.enc)
- if err != nil {
- t.Fatalf("Error at step index %d: %v", i, err)
- }
- if !reflect.DeepEqual(hf, step.want) {
- t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want)
- }
- gotDynTab := d.dynTab.reverseCopy()
- if !reflect.DeepEqual(gotDynTab, step.wantDynTab) {
- t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab)
- }
- if d.dynTab.size != step.wantDynSize {
- t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize)
- }
- }
-}
-
-func TestHuffmanDecode(t *testing.T) {
- tests := []struct {
- inHex, want string
- }{
- {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"},
- {"a8eb 1064 9cbf", "no-cache"},
- {"25a8 49e9 5ba9 7d7f", "custom-key"},
- {"25a8 49e9 5bb8 e8b4 bf", "custom-value"},
- {"6402", "302"},
- {"aec3 771a 4b", "private"},
- {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"},
- {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"},
- {"9bd9 ab", "gzip"},
- {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
- "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
- }
- for i, tt := range tests {
- var buf bytes.Buffer
- in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1))
- if err != nil {
- t.Errorf("%d. hex input error: %v", i, err)
- continue
- }
- if _, err := HuffmanDecode(&buf, in); err != nil {
- t.Errorf("%d. decode error: %v", i, err)
- continue
- }
- if got := buf.String(); tt.want != got {
- t.Errorf("%d. decode = %q; want %q", i, got, tt.want)
- }
- }
-}
-
-func TestAppendHuffmanString(t *testing.T) {
- tests := []struct {
- in, want string
- }{
- {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
- {"no-cache", "a8eb 1064 9cbf"},
- {"custom-key", "25a8 49e9 5ba9 7d7f"},
- {"custom-value", "25a8 49e9 5bb8 e8b4 bf"},
- {"302", "6402"},
- {"private", "aec3 771a 4b"},
- {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"},
- {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"},
- {"gzip", "9bd9 ab"},
- {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
- "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"},
- }
- for i, tt := range tests {
- buf := []byte{}
- want := strings.Replace(tt.want, " ", "", -1)
- buf = AppendHuffmanString(buf, tt.in)
- if got := hex.EncodeToString(buf); want != got {
- t.Errorf("%d. encode = %q; want %q", i, got, want)
- }
- }
-}
-
-func TestHuffmanMaxStrLen(t *testing.T) {
- const msg = "Some string"
- huff := AppendHuffmanString(nil, msg)
-
- testGood := func(max int) {
- var out bytes.Buffer
- if err := huffmanDecode(&out, max, huff); err != nil {
- t.Errorf("For maxLen=%d, unexpected error: %v", max, err)
- }
- if out.String() != msg {
- t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg)
- }
- }
- testGood(0)
- testGood(len(msg))
- testGood(len(msg) + 1)
-
- var out bytes.Buffer
- if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength {
- t.Errorf("err = %v; want ErrStringLength", err)
- }
-}
-
-func TestHuffmanRoundtripStress(t *testing.T) {
- const Len = 50 // of uncompressed string
- input := make([]byte, Len)
- var output bytes.Buffer
- var huff []byte
-
- n := 5000
- if testing.Short() {
- n = 100
- }
- seed := time.Now().UnixNano()
- t.Logf("Seed = %v", seed)
- src := rand.New(rand.NewSource(seed))
- var encSize int64
- for i := 0; i < n; i++ {
- for l := range input {
- input[l] = byte(src.Intn(256))
- }
- huff = AppendHuffmanString(huff[:0], string(input))
- encSize += int64(len(huff))
- output.Reset()
- if err := huffmanDecode(&output, 0, huff); err != nil {
- t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err)
- continue
- }
- if !bytes.Equal(output.Bytes(), input) {
- t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes())
- }
- }
- t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize)
-}
-
-func TestHuffmanDecodeFuzz(t *testing.T) {
- const Len = 50 // of compressed
- var buf, zbuf bytes.Buffer
-
- n := 5000
- if testing.Short() {
- n = 100
- }
- seed := time.Now().UnixNano()
- t.Logf("Seed = %v", seed)
- src := rand.New(rand.NewSource(seed))
- numFail := 0
- for i := 0; i < n; i++ {
- zbuf.Reset()
- if i == 0 {
- // Start with at least one invalid one.
- zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8")
- } else {
- for l := 0; l < Len; l++ {
- zbuf.WriteByte(byte(src.Intn(256)))
- }
- }
-
- buf.Reset()
- if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil {
- if err == ErrInvalidHuffman {
- numFail++
- continue
- }
- t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err)
- continue
- }
- }
- t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n)
- if numFail < 1 {
- t.Error("expected at least one invalid huffman encoding (test starts with one)")
- }
-}
-
-func TestReadVarInt(t *testing.T) {
- type res struct {
- i uint64
- consumed int
- err error
- }
- tests := []struct {
- n byte
- p []byte
- want res
- }{
- // Fits in a byte:
- {1, []byte{0}, res{0, 1, nil}},
- {2, []byte{2}, res{2, 1, nil}},
- {3, []byte{6}, res{6, 1, nil}},
- {4, []byte{14}, res{14, 1, nil}},
- {5, []byte{30}, res{30, 1, nil}},
- {6, []byte{62}, res{62, 1, nil}},
- {7, []byte{126}, res{126, 1, nil}},
- {8, []byte{254}, res{254, 1, nil}},
-
- // Doesn't fit in a byte:
- {1, []byte{1}, res{0, 0, errNeedMore}},
- {2, []byte{3}, res{0, 0, errNeedMore}},
- {3, []byte{7}, res{0, 0, errNeedMore}},
- {4, []byte{15}, res{0, 0, errNeedMore}},
- {5, []byte{31}, res{0, 0, errNeedMore}},
- {6, []byte{63}, res{0, 0, errNeedMore}},
- {7, []byte{127}, res{0, 0, errNeedMore}},
- {8, []byte{255}, res{0, 0, errNeedMore}},
-
- // Ignoring top bits:
- {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111
- {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100
- {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101
-
- // Extra byte:
- {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte
-
- // Short a byte:
- {5, []byte{191, 154}, res{0, 0, errNeedMore}},
-
- // integer overflow:
- {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},
- }
- for _, tt := range tests {
- i, remain, err := readVarInt(tt.n, tt.p)
- consumed := len(tt.p) - len(remain)
- got := res{i, consumed, err}
- if got != tt.want {
- t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want)
- }
- }
-}
-
-// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56
-func TestHuffmanFuzzCrash(t *testing.T) {
- got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8"))
- if got != "" {
- t.Errorf("Got %q; want empty string", got)
- }
- if err != ErrInvalidHuffman {
- t.Errorf("Err = %v; want ErrInvalidHuffman", err)
- }
-}
-
-func dehex(s string) []byte {
- s = strings.Replace(s, " ", "", -1)
- s = strings.Replace(s, "\n", "", -1)
- b, err := hex.DecodeString(s)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-func TestEmitEnabled(t *testing.T) {
- var buf bytes.Buffer
- enc := NewEncoder(&buf)
- enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
- enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
-
- numCallback := 0
- var dec *Decoder
- dec = NewDecoder(8<<20, func(HeaderField) {
- numCallback++
- dec.SetEmitEnabled(false)
- })
- if !dec.EmitEnabled() {
- t.Errorf("initial emit enabled = false; want true")
- }
- if _, err := dec.Write(buf.Bytes()); err != nil {
- t.Error(err)
- }
- if numCallback != 1 {
- t.Errorf("num callbacks = %d; want 1", numCallback)
- }
- if dec.EmitEnabled() {
- t.Errorf("emit enabled = true; want false")
- }
-}
-
-func TestSaveBufLimit(t *testing.T) {
- const maxStr = 1 << 10
- var got []HeaderField
- dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) {
- got = append(got, hf)
- })
- dec.SetMaxStringLength(maxStr)
- var frag []byte
- frag = append(frag[:0], encodeTypeByte(false, false))
- frag = appendVarInt(frag, 7, 3)
- frag = append(frag, "foo"...)
- frag = appendVarInt(frag, 7, 3)
- frag = append(frag, "bar"...)
-
- if _, err := dec.Write(frag); err != nil {
- t.Fatal(err)
- }
-
- want := []HeaderField{{Name: "foo", Value: "bar"}}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("After small writes, got %v; want %v", got, want)
- }
-
- frag = append(frag[:0], encodeTypeByte(false, false))
- frag = appendVarInt(frag, 7, maxStr*3)
- frag = append(frag, make([]byte, maxStr*3)...)
-
- _, err := dec.Write(frag)
- if err != ErrStringLength {
- t.Fatalf("Write error = %v; want ErrStringLength", err)
- }
-}
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
deleted file mode 100644
index eb4b1f05..00000000
--- a/vendor/golang.org/x/net/http2/hpack/huffman.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package hpack
-
-import (
- "bytes"
- "errors"
- "io"
- "sync"
-)
-
-var bufPool = sync.Pool{
- New: func() interface{} { return new(bytes.Buffer) },
-}
-
-// HuffmanDecode decodes the string in v and writes the expanded
-// result to w, returning the number of bytes written to w and the
-// Write call's return value. At most one Write call is made.
-func HuffmanDecode(w io.Writer, v []byte) (int, error) {
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
- defer bufPool.Put(buf)
- if err := huffmanDecode(buf, 0, v); err != nil {
- return 0, err
- }
- return w.Write(buf.Bytes())
-}
-
-// HuffmanDecodeToString decodes the string in v.
-func HuffmanDecodeToString(v []byte) (string, error) {
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
- defer bufPool.Put(buf)
- if err := huffmanDecode(buf, 0, v); err != nil {
- return "", err
- }
- return buf.String(), nil
-}
-
-// ErrInvalidHuffman is returned for errors found decoding
-// Huffman-encoded strings.
-var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
-
-// huffmanDecode decodes v to buf.
-// If maxLen is greater than 0, attempts to write more to buf than
-// maxLen bytes will return ErrStringLength.
-func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
- n := rootHuffmanNode
- cur, nbits := uint(0), uint8(0)
- for _, b := range v {
- cur = cur<<8 | uint(b)
- nbits += 8
- for nbits >= 8 {
- idx := byte(cur >> (nbits - 8))
- n = n.children[idx]
- if n == nil {
- return ErrInvalidHuffman
- }
- if n.children == nil {
- if maxLen != 0 && buf.Len() == maxLen {
- return ErrStringLength
- }
- buf.WriteByte(n.sym)
- nbits -= n.codeLen
- n = rootHuffmanNode
- } else {
- nbits -= 8
- }
- }
- }
- for nbits > 0 {
- n = n.children[byte(cur<<(8-nbits))]
- if n.children != nil || n.codeLen > nbits {
- break
- }
- buf.WriteByte(n.sym)
- nbits -= n.codeLen
- n = rootHuffmanNode
- }
- return nil
-}
-
-type node struct {
- // children is non-nil for internal nodes
- children []*node
-
- // The following are only valid if children is nil:
- codeLen uint8 // number of bits that led to the output of sym
- sym byte // output symbol
-}
-
-func newInternalNode() *node {
- return &node{children: make([]*node, 256)}
-}
-
-var rootHuffmanNode = newInternalNode()
-
-func init() {
- if len(huffmanCodes) != 256 {
- panic("unexpected size")
- }
- for i, code := range huffmanCodes {
- addDecoderNode(byte(i), code, huffmanCodeLen[i])
- }
-}
-
-func addDecoderNode(sym byte, code uint32, codeLen uint8) {
- cur := rootHuffmanNode
- for codeLen > 8 {
- codeLen -= 8
- i := uint8(code >> codeLen)
- if cur.children[i] == nil {
- cur.children[i] = newInternalNode()
- }
- cur = cur.children[i]
- }
- shift := 8 - codeLen
- start, end := int(uint8(code<> (nbits - rembits))
- dst[len(dst)-1] |= t
- }
-
- return dst
-}
-
-// HuffmanEncodeLength returns the number of bytes required to encode
-// s in Huffman codes. The result is round up to byte boundary.
-func HuffmanEncodeLength(s string) uint64 {
- n := uint64(0)
- for i := 0; i < len(s); i++ {
- n += uint64(huffmanCodeLen[s[i]])
- }
- return (n + 7) / 8
-}
-
-// appendByteToHuffmanCode appends Huffman code for c to dst and
-// returns the extended buffer and the remaining bits in the last
-// element. The appending is not byte aligned and the remaining bits
-// in the last element of dst is given in rembits.
-func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
- code := huffmanCodes[c]
- nbits := huffmanCodeLen[c]
-
- for {
- if rembits > nbits {
- t := uint8(code << (rembits - nbits))
- dst[len(dst)-1] |= t
- rembits -= nbits
- break
- }
-
- t := uint8(code >> (nbits - rembits))
- dst[len(dst)-1] |= t
-
- nbits -= rembits
- rembits = 8
-
- if nbits == 0 {
- break
- }
-
- dst = append(dst, 0)
- }
-
- return dst, rembits
-}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go
deleted file mode 100644
index b9283a02..00000000
--- a/vendor/golang.org/x/net/http2/hpack/tables.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package hpack
-
-func pair(name, value string) HeaderField {
- return HeaderField{Name: name, Value: value}
-}
-
-// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
-var staticTable = [...]HeaderField{
- pair(":authority", ""), // index 1 (1-based)
- pair(":method", "GET"),
- pair(":method", "POST"),
- pair(":path", "/"),
- pair(":path", "/index.html"),
- pair(":scheme", "http"),
- pair(":scheme", "https"),
- pair(":status", "200"),
- pair(":status", "204"),
- pair(":status", "206"),
- pair(":status", "304"),
- pair(":status", "400"),
- pair(":status", "404"),
- pair(":status", "500"),
- pair("accept-charset", ""),
- pair("accept-encoding", "gzip, deflate"),
- pair("accept-language", ""),
- pair("accept-ranges", ""),
- pair("accept", ""),
- pair("access-control-allow-origin", ""),
- pair("age", ""),
- pair("allow", ""),
- pair("authorization", ""),
- pair("cache-control", ""),
- pair("content-disposition", ""),
- pair("content-encoding", ""),
- pair("content-language", ""),
- pair("content-length", ""),
- pair("content-location", ""),
- pair("content-range", ""),
- pair("content-type", ""),
- pair("cookie", ""),
- pair("date", ""),
- pair("etag", ""),
- pair("expect", ""),
- pair("expires", ""),
- pair("from", ""),
- pair("host", ""),
- pair("if-match", ""),
- pair("if-modified-since", ""),
- pair("if-none-match", ""),
- pair("if-range", ""),
- pair("if-unmodified-since", ""),
- pair("last-modified", ""),
- pair("link", ""),
- pair("location", ""),
- pair("max-forwards", ""),
- pair("proxy-authenticate", ""),
- pair("proxy-authorization", ""),
- pair("range", ""),
- pair("referer", ""),
- pair("refresh", ""),
- pair("retry-after", ""),
- pair("server", ""),
- pair("set-cookie", ""),
- pair("strict-transport-security", ""),
- pair("transfer-encoding", ""),
- pair("user-agent", ""),
- pair("vary", ""),
- pair("via", ""),
- pair("www-authenticate", ""),
-}
-
-var huffmanCodes = [256]uint32{
- 0x1ff8,
- 0x7fffd8,
- 0xfffffe2,
- 0xfffffe3,
- 0xfffffe4,
- 0xfffffe5,
- 0xfffffe6,
- 0xfffffe7,
- 0xfffffe8,
- 0xffffea,
- 0x3ffffffc,
- 0xfffffe9,
- 0xfffffea,
- 0x3ffffffd,
- 0xfffffeb,
- 0xfffffec,
- 0xfffffed,
- 0xfffffee,
- 0xfffffef,
- 0xffffff0,
- 0xffffff1,
- 0xffffff2,
- 0x3ffffffe,
- 0xffffff3,
- 0xffffff4,
- 0xffffff5,
- 0xffffff6,
- 0xffffff7,
- 0xffffff8,
- 0xffffff9,
- 0xffffffa,
- 0xffffffb,
- 0x14,
- 0x3f8,
- 0x3f9,
- 0xffa,
- 0x1ff9,
- 0x15,
- 0xf8,
- 0x7fa,
- 0x3fa,
- 0x3fb,
- 0xf9,
- 0x7fb,
- 0xfa,
- 0x16,
- 0x17,
- 0x18,
- 0x0,
- 0x1,
- 0x2,
- 0x19,
- 0x1a,
- 0x1b,
- 0x1c,
- 0x1d,
- 0x1e,
- 0x1f,
- 0x5c,
- 0xfb,
- 0x7ffc,
- 0x20,
- 0xffb,
- 0x3fc,
- 0x1ffa,
- 0x21,
- 0x5d,
- 0x5e,
- 0x5f,
- 0x60,
- 0x61,
- 0x62,
- 0x63,
- 0x64,
- 0x65,
- 0x66,
- 0x67,
- 0x68,
- 0x69,
- 0x6a,
- 0x6b,
- 0x6c,
- 0x6d,
- 0x6e,
- 0x6f,
- 0x70,
- 0x71,
- 0x72,
- 0xfc,
- 0x73,
- 0xfd,
- 0x1ffb,
- 0x7fff0,
- 0x1ffc,
- 0x3ffc,
- 0x22,
- 0x7ffd,
- 0x3,
- 0x23,
- 0x4,
- 0x24,
- 0x5,
- 0x25,
- 0x26,
- 0x27,
- 0x6,
- 0x74,
- 0x75,
- 0x28,
- 0x29,
- 0x2a,
- 0x7,
- 0x2b,
- 0x76,
- 0x2c,
- 0x8,
- 0x9,
- 0x2d,
- 0x77,
- 0x78,
- 0x79,
- 0x7a,
- 0x7b,
- 0x7ffe,
- 0x7fc,
- 0x3ffd,
- 0x1ffd,
- 0xffffffc,
- 0xfffe6,
- 0x3fffd2,
- 0xfffe7,
- 0xfffe8,
- 0x3fffd3,
- 0x3fffd4,
- 0x3fffd5,
- 0x7fffd9,
- 0x3fffd6,
- 0x7fffda,
- 0x7fffdb,
- 0x7fffdc,
- 0x7fffdd,
- 0x7fffde,
- 0xffffeb,
- 0x7fffdf,
- 0xffffec,
- 0xffffed,
- 0x3fffd7,
- 0x7fffe0,
- 0xffffee,
- 0x7fffe1,
- 0x7fffe2,
- 0x7fffe3,
- 0x7fffe4,
- 0x1fffdc,
- 0x3fffd8,
- 0x7fffe5,
- 0x3fffd9,
- 0x7fffe6,
- 0x7fffe7,
- 0xffffef,
- 0x3fffda,
- 0x1fffdd,
- 0xfffe9,
- 0x3fffdb,
- 0x3fffdc,
- 0x7fffe8,
- 0x7fffe9,
- 0x1fffde,
- 0x7fffea,
- 0x3fffdd,
- 0x3fffde,
- 0xfffff0,
- 0x1fffdf,
- 0x3fffdf,
- 0x7fffeb,
- 0x7fffec,
- 0x1fffe0,
- 0x1fffe1,
- 0x3fffe0,
- 0x1fffe2,
- 0x7fffed,
- 0x3fffe1,
- 0x7fffee,
- 0x7fffef,
- 0xfffea,
- 0x3fffe2,
- 0x3fffe3,
- 0x3fffe4,
- 0x7ffff0,
- 0x3fffe5,
- 0x3fffe6,
- 0x7ffff1,
- 0x3ffffe0,
- 0x3ffffe1,
- 0xfffeb,
- 0x7fff1,
- 0x3fffe7,
- 0x7ffff2,
- 0x3fffe8,
- 0x1ffffec,
- 0x3ffffe2,
- 0x3ffffe3,
- 0x3ffffe4,
- 0x7ffffde,
- 0x7ffffdf,
- 0x3ffffe5,
- 0xfffff1,
- 0x1ffffed,
- 0x7fff2,
- 0x1fffe3,
- 0x3ffffe6,
- 0x7ffffe0,
- 0x7ffffe1,
- 0x3ffffe7,
- 0x7ffffe2,
- 0xfffff2,
- 0x1fffe4,
- 0x1fffe5,
- 0x3ffffe8,
- 0x3ffffe9,
- 0xffffffd,
- 0x7ffffe3,
- 0x7ffffe4,
- 0x7ffffe5,
- 0xfffec,
- 0xfffff3,
- 0xfffed,
- 0x1fffe6,
- 0x3fffe9,
- 0x1fffe7,
- 0x1fffe8,
- 0x7ffff3,
- 0x3fffea,
- 0x3fffeb,
- 0x1ffffee,
- 0x1ffffef,
- 0xfffff4,
- 0xfffff5,
- 0x3ffffea,
- 0x7ffff4,
- 0x3ffffeb,
- 0x7ffffe6,
- 0x3ffffec,
- 0x3ffffed,
- 0x7ffffe7,
- 0x7ffffe8,
- 0x7ffffe9,
- 0x7ffffea,
- 0x7ffffeb,
- 0xffffffe,
- 0x7ffffec,
- 0x7ffffed,
- 0x7ffffee,
- 0x7ffffef,
- 0x7fffff0,
- 0x3ffffee,
-}
-
-var huffmanCodeLen = [256]uint8{
- 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
- 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
- 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
- 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
- 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
- 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
- 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
- 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
- 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
- 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
- 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
- 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
- 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
- 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
- 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
-}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
deleted file mode 100644
index 4c5e11ac..00000000
--- a/vendor/golang.org/x/net/http2/http2.go
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package http2 implements the HTTP/2 protocol.
-//
-// This package is low-level and intended to be used directly by very
-// few people. Most users will use it indirectly through the automatic
-// use by the net/http package (from Go 1.6 and later).
-// For use in earlier Go versions see ConfigureServer. (Transport support
-// requires Go 1.6 or later)
-//
-// See https://http2.github.io/ for more information on HTTP/2.
-//
-// See https://http2.golang.org/ for a test server running this code.
-package http2
-
-import (
- "bufio"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "net/http"
- "os"
- "strconv"
- "strings"
- "sync"
-)
-
-var (
- VerboseLogs bool
- logFrameWrites bool
- logFrameReads bool
-)
-
-func init() {
- e := os.Getenv("GODEBUG")
- if strings.Contains(e, "http2debug=1") {
- VerboseLogs = true
- }
- if strings.Contains(e, "http2debug=2") {
- VerboseLogs = true
- logFrameWrites = true
- logFrameReads = true
- }
-}
-
-const (
- // ClientPreface is the string that must be sent by new
- // connections from clients.
- ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
-
- // SETTINGS_MAX_FRAME_SIZE default
- // http://http2.github.io/http2-spec/#rfc.section.6.5.2
- initialMaxFrameSize = 16384
-
- // NextProtoTLS is the NPN/ALPN protocol negotiated during
- // HTTP/2's TLS setup.
- NextProtoTLS = "h2"
-
- // http://http2.github.io/http2-spec/#SettingValues
- initialHeaderTableSize = 4096
-
- initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
-
- defaultMaxReadFrameSize = 1 << 20
-)
-
-var (
- clientPreface = []byte(ClientPreface)
-)
-
-type streamState int
-
-const (
- stateIdle streamState = iota
- stateOpen
- stateHalfClosedLocal
- stateHalfClosedRemote
- stateResvLocal
- stateResvRemote
- stateClosed
-)
-
-var stateName = [...]string{
- stateIdle: "Idle",
- stateOpen: "Open",
- stateHalfClosedLocal: "HalfClosedLocal",
- stateHalfClosedRemote: "HalfClosedRemote",
- stateResvLocal: "ResvLocal",
- stateResvRemote: "ResvRemote",
- stateClosed: "Closed",
-}
-
-func (st streamState) String() string {
- return stateName[st]
-}
-
-// Setting is a setting parameter: which setting it is, and its value.
-type Setting struct {
- // ID is which setting is being set.
- // See http://http2.github.io/http2-spec/#SettingValues
- ID SettingID
-
- // Val is the value.
- Val uint32
-}
-
-func (s Setting) String() string {
- return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
-}
-
-// Valid reports whether the setting is valid.
-func (s Setting) Valid() error {
- // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
- switch s.ID {
- case SettingEnablePush:
- if s.Val != 1 && s.Val != 0 {
- return ConnectionError(ErrCodeProtocol)
- }
- case SettingInitialWindowSize:
- if s.Val > 1<<31-1 {
- return ConnectionError(ErrCodeFlowControl)
- }
- case SettingMaxFrameSize:
- if s.Val < 16384 || s.Val > 1<<24-1 {
- return ConnectionError(ErrCodeProtocol)
- }
- }
- return nil
-}
-
-// A SettingID is an HTTP/2 setting as defined in
-// http://http2.github.io/http2-spec/#iana-settings
-type SettingID uint16
-
-const (
- SettingHeaderTableSize SettingID = 0x1
- SettingEnablePush SettingID = 0x2
- SettingMaxConcurrentStreams SettingID = 0x3
- SettingInitialWindowSize SettingID = 0x4
- SettingMaxFrameSize SettingID = 0x5
- SettingMaxHeaderListSize SettingID = 0x6
-)
-
-var settingName = map[SettingID]string{
- SettingHeaderTableSize: "HEADER_TABLE_SIZE",
- SettingEnablePush: "ENABLE_PUSH",
- SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
- SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
- SettingMaxFrameSize: "MAX_FRAME_SIZE",
- SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
-}
-
-func (s SettingID) String() string {
- if v, ok := settingName[s]; ok {
- return v
- }
- return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
-}
-
-var (
- errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
- errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
-)
-
-// validHeaderFieldName reports whether v is a valid header field name (key).
-// RFC 7230 says:
-// header-field = field-name ":" OWS field-value OWS
-// field-name = token
-// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
-// "^" / "_" / "
-// Further, http2 says:
-// "Just as in HTTP/1.x, header field names are strings of ASCII
-// characters that are compared in a case-insensitive
-// fashion. However, header field names MUST be converted to
-// lowercase prior to their encoding in HTTP/2. "
-func validHeaderFieldName(v string) bool {
- if len(v) == 0 {
- return false
- }
- for _, r := range v {
- if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') {
- return false
- }
- if !isTokenTable[byte(r)] {
- return false
- }
- }
- return true
-}
-
-// validHeaderFieldValue reports whether v is a valid header field value.
-//
-// RFC 7230 says:
-// field-value = *( field-content / obs-fold )
-// obj-fold = N/A to http2, and deprecated
-// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
-// field-vchar = VCHAR / obs-text
-// obs-text = %x80-FF
-// VCHAR = "any visible [USASCII] character"
-//
-// http2 further says: "Similarly, HTTP/2 allows header field values
-// that are not valid. While most of the values that can be encoded
-// will not alter header field parsing, carriage return (CR, ASCII
-// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
-// 0x0) might be exploited by an attacker if they are translated
-// verbatim. Any request or response that contains a character not
-// permitted in a header field value MUST be treated as malformed
-// (Section 8.1.2.6). Valid characters are defined by the
-// field-content ABNF rule in Section 3.2 of [RFC7230]."
-//
-// This function does not (yet?) properly handle the rejection of
-// strings that begin or end with SP or HTAB.
-func validHeaderFieldValue(v string) bool {
- for i := 0; i < len(v); i++ {
- if b := v[i]; b < ' ' && b != '\t' || b == 0x7f {
- return false
- }
- }
- return true
-}
-
-var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
-
-func init() {
- for i := 100; i <= 999; i++ {
- if v := http.StatusText(i); v != "" {
- httpCodeStringCommon[i] = strconv.Itoa(i)
- }
- }
-}
-
-func httpCodeString(code int) string {
- if s, ok := httpCodeStringCommon[code]; ok {
- return s
- }
- return strconv.Itoa(code)
-}
-
-// from pkg io
-type stringWriter interface {
- WriteString(s string) (n int, err error)
-}
-
-// A gate lets two goroutines coordinate their activities.
-type gate chan struct{}
-
-func (g gate) Done() { g <- struct{}{} }
-func (g gate) Wait() { <-g }
-
-// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
-type closeWaiter chan struct{}
-
-// Init makes a closeWaiter usable.
-// It exists because so a closeWaiter value can be placed inside a
-// larger struct and have the Mutex and Cond's memory in the same
-// allocation.
-func (cw *closeWaiter) Init() {
- *cw = make(chan struct{})
-}
-
-// Close marks the closeWaiter as closed and unblocks any waiters.
-func (cw closeWaiter) Close() {
- close(cw)
-}
-
-// Wait waits for the closeWaiter to become closed.
-func (cw closeWaiter) Wait() {
- <-cw
-}
-
-// bufferedWriter is a buffered writer that writes to w.
-// Its buffered writer is lazily allocated as needed, to minimize
-// idle memory usage with many connections.
-type bufferedWriter struct {
- w io.Writer // immutable
- bw *bufio.Writer // non-nil when data is buffered
-}
-
-func newBufferedWriter(w io.Writer) *bufferedWriter {
- return &bufferedWriter{w: w}
-}
-
-var bufWriterPool = sync.Pool{
- New: func() interface{} {
- // TODO: pick something better? this is a bit under
- // (3 x typical 1500 byte MTU) at least.
- return bufio.NewWriterSize(nil, 4<<10)
- },
-}
-
-func (w *bufferedWriter) Write(p []byte) (n int, err error) {
- if w.bw == nil {
- bw := bufWriterPool.Get().(*bufio.Writer)
- bw.Reset(w.w)
- w.bw = bw
- }
- return w.bw.Write(p)
-}
-
-func (w *bufferedWriter) Flush() error {
- bw := w.bw
- if bw == nil {
- return nil
- }
- err := bw.Flush()
- bw.Reset(nil)
- bufWriterPool.Put(bw)
- w.bw = nil
- return err
-}
-
-func mustUint31(v int32) uint32 {
- if v < 0 || v > 2147483647 {
- panic("out of range")
- }
- return uint32(v)
-}
-
-// bodyAllowedForStatus reports whether a given response status code
-// permits a body. See RFC2616, section 4.4.
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-type httpError struct {
- msg string
- timeout bool
-}
-
-func (e *httpError) Error() string { return e.msg }
-func (e *httpError) Timeout() bool { return e.timeout }
-func (e *httpError) Temporary() bool { return true }
-
-var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
-
-var isTokenTable = [127]bool{
- '!': true,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'W': true,
- 'V': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '|': true,
- '~': true,
-}
-
-type connectionStater interface {
- ConnectionState() tls.ConnectionState
-}
diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go
deleted file mode 100644
index 0a4da46a..00000000
--- a/vendor/golang.org/x/net/http2/http2_test.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bytes"
- "errors"
- "flag"
- "fmt"
- "net/http"
- "os/exec"
- "strconv"
- "strings"
- "testing"
-
- "golang.org/x/net/http2/hpack"
-)
-
-var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
-
-func condSkipFailingTest(t *testing.T) {
- if !*knownFailing {
- t.Skip("Skipping known-failing test without --known_failing")
- }
-}
-
-func init() {
- DebugGoroutines = true
- flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging")
-}
-
-func TestSettingString(t *testing.T) {
- tests := []struct {
- s Setting
- want string
- }{
- {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
- {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
- }
- for i, tt := range tests {
- got := fmt.Sprint(tt.s)
- if got != tt.want {
- t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
- }
- }
-}
-
-type twriter struct {
- t testing.TB
- st *serverTester // optional
-}
-
-func (w twriter) Write(p []byte) (n int, err error) {
- if w.st != nil {
- ps := string(p)
- for _, phrase := range w.st.logFilter {
- if strings.Contains(ps, phrase) {
- return len(p), nil // no logging
- }
- }
- }
- w.t.Logf("%s", p)
- return len(p), nil
-}
-
-// like encodeHeader, but don't add implicit psuedo headers.
-func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- for len(headers) > 0 {
- k, v := headers[0], headers[1]
- headers = headers[2:]
- if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
- t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
- }
- }
- return buf.Bytes()
-}
-
-// Verify that curl has http2.
-func requireCurl(t *testing.T) {
- out, err := dockerLogs(curl(t, "--version"))
- if err != nil {
- t.Skipf("failed to determine curl features; skipping test")
- }
- if !strings.Contains(string(out), "HTTP2") {
- t.Skip("curl doesn't support HTTP2; skipping test")
- }
-}
-
-func curl(t *testing.T, args ...string) (container string) {
- out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output()
- if err != nil {
- t.Skipf("Failed to run curl in docker: %v, %s", err, out)
- }
- return strings.TrimSpace(string(out))
-}
-
-// Verify that h2load exists.
-func requireH2load(t *testing.T) {
- out, err := dockerLogs(h2load(t, "--version"))
- if err != nil {
- t.Skipf("failed to probe h2load; skipping test: %s", out)
- }
- if !strings.Contains(string(out), "h2load nghttp2/") {
- t.Skipf("h2load not present; skipping test. (Output=%q)", out)
- }
-}
-
-func h2load(t *testing.T, args ...string) (container string) {
- out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output()
- if err != nil {
- t.Skipf("Failed to run h2load in docker: %v, %s", err, out)
- }
- return strings.TrimSpace(string(out))
-}
-
-type puppetCommand struct {
- fn func(w http.ResponseWriter, r *http.Request)
- done chan<- bool
-}
-
-type handlerPuppet struct {
- ch chan puppetCommand
-}
-
-func newHandlerPuppet() *handlerPuppet {
- return &handlerPuppet{
- ch: make(chan puppetCommand),
- }
-}
-
-func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
- for cmd := range p.ch {
- cmd.fn(w, r)
- cmd.done <- true
- }
-}
-
-func (p *handlerPuppet) done() { close(p.ch) }
-func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
- done := make(chan bool)
- p.ch <- puppetCommand{fn, done}
- <-done
-}
-func dockerLogs(container string) ([]byte, error) {
- out, err := exec.Command("docker", "wait", container).CombinedOutput()
- if err != nil {
- return out, err
- }
- exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
- if err != nil {
- return out, errors.New("unexpected exit status from docker wait")
- }
- out, err = exec.Command("docker", "logs", container).CombinedOutput()
- exec.Command("docker", "rm", container).Run()
- if err == nil && exitStatus != 0 {
- err = fmt.Errorf("exit status %d: %s", exitStatus, out)
- }
- return out, err
-}
-
-func kill(container string) {
- exec.Command("docker", "kill", container).Run()
- exec.Command("docker", "rm", container).Run()
-}
-
-func cleanDate(res *http.Response) {
- if d := res.Header["Date"]; len(d) == 1 {
- d[0] = "XXX"
- }
-}
diff --git a/vendor/golang.org/x/net/http2/not_go15.go b/vendor/golang.org/x/net/http2/not_go15.go
deleted file mode 100644
index d0fa5c89..00000000
--- a/vendor/golang.org/x/net/http2/not_go15.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.5
-
-package http2
-
-import "net/http"
-
-func requestCancel(req *http.Request) <-chan struct{} { return nil }
diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go
deleted file mode 100644
index db53c5b8..00000000
--- a/vendor/golang.org/x/net/http2/not_go16.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.6
-
-package http2
-
-import "net/http"
-
-func configureTransport(t1 *http.Transport) (*Transport, error) {
- return nil, errTransportVersion
-}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
deleted file mode 100644
index 69446e7a..00000000
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "errors"
- "io"
- "sync"
-)
-
-// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
-// io.Pipe except there are no PipeReader/PipeWriter halves, and the
-// underlying buffer is an interface. (io.Pipe is always unbuffered)
-type pipe struct {
- mu sync.Mutex
- c sync.Cond // c.L lazily initialized to &p.mu
- b pipeBuffer
- err error // read error once empty. non-nil means closed.
- breakErr error // immediate read error (caller doesn't see rest of b)
- donec chan struct{} // closed on error
- readFn func() // optional code to run in Read before error
-}
-
-type pipeBuffer interface {
- Len() int
- io.Writer
- io.Reader
-}
-
-// Read waits until data is available and copies bytes
-// from the buffer into p.
-func (p *pipe) Read(d []byte) (n int, err error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- for {
- if p.breakErr != nil {
- return 0, p.breakErr
- }
- if p.b.Len() > 0 {
- return p.b.Read(d)
- }
- if p.err != nil {
- if p.readFn != nil {
- p.readFn() // e.g. copy trailers
- p.readFn = nil // not sticky like p.err
- }
- return 0, p.err
- }
- p.c.Wait()
- }
-}
-
-var errClosedPipeWrite = errors.New("write on closed buffer")
-
-// Write copies bytes from p into the buffer and wakes a reader.
-// It is an error to write more data than the buffer can hold.
-func (p *pipe) Write(d []byte) (n int, err error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- defer p.c.Signal()
- if p.err != nil {
- return 0, errClosedPipeWrite
- }
- return p.b.Write(d)
-}
-
-// CloseWithError causes the next Read (waking up a current blocked
-// Read if needed) to return the provided err after all data has been
-// read.
-//
-// The error must be non-nil.
-func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
-
-// BreakWithError causes the next Read (waking up a current blocked
-// Read if needed) to return the provided err immediately, without
-// waiting for unread data.
-func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
-
-// closeWithErrorAndCode is like CloseWithError but also sets some code to run
-// in the caller's goroutine before returning the error.
-func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
-
-func (p *pipe) closeWithError(dst *error, err error, fn func()) {
- if err == nil {
- panic("err must be non-nil")
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- defer p.c.Signal()
- if *dst != nil {
- // Already been done.
- return
- }
- p.readFn = fn
- *dst = err
- p.closeDoneLocked()
-}
-
-// requires p.mu be held.
-func (p *pipe) closeDoneLocked() {
- if p.donec == nil {
- return
- }
- // Close if unclosed. This isn't racy since we always
- // hold p.mu while closing.
- select {
- case <-p.donec:
- default:
- close(p.donec)
- }
-}
-
-// Err returns the error (if any) first set by BreakWithError or CloseWithError.
-func (p *pipe) Err() error {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.breakErr != nil {
- return p.breakErr
- }
- return p.err
-}
-
-// Done returns a channel which is closed if and when this pipe is closed
-// with CloseWithError.
-func (p *pipe) Done() <-chan struct{} {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.donec == nil {
- p.donec = make(chan struct{})
- if p.err != nil || p.breakErr != nil {
- // Already hit an error.
- p.closeDoneLocked()
- }
- }
- return p.donec
-}
diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go
deleted file mode 100644
index 76322999..00000000
--- a/vendor/golang.org/x/net/http2/pipe_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
- "testing"
-)
-
-func TestPipeClose(t *testing.T) {
- var p pipe
- p.b = new(bytes.Buffer)
- a := errors.New("a")
- b := errors.New("b")
- p.CloseWithError(a)
- p.CloseWithError(b)
- _, err := p.Read(make([]byte, 1))
- if err != a {
- t.Errorf("err = %v want %v", err, a)
- }
-}
-
-func TestPipeDoneChan(t *testing.T) {
- var p pipe
- done := p.Done()
- select {
- case <-done:
- t.Fatal("done too soon")
- default:
- }
- p.CloseWithError(io.EOF)
- select {
- case <-done:
- default:
- t.Fatal("should be done")
- }
-}
-
-func TestPipeDoneChan_ErrFirst(t *testing.T) {
- var p pipe
- p.CloseWithError(io.EOF)
- done := p.Done()
- select {
- case <-done:
- default:
- t.Fatal("should be done")
- }
-}
-
-func TestPipeDoneChan_Break(t *testing.T) {
- var p pipe
- done := p.Done()
- select {
- case <-done:
- t.Fatal("done too soon")
- default:
- }
- p.BreakWithError(io.EOF)
- select {
- case <-done:
- default:
- t.Fatal("should be done")
- }
-}
-
-func TestPipeDoneChan_Break_ErrFirst(t *testing.T) {
- var p pipe
- p.BreakWithError(io.EOF)
- done := p.Done()
- select {
- case <-done:
- default:
- t.Fatal("should be done")
- }
-}
-
-func TestPipeCloseWithError(t *testing.T) {
- p := &pipe{b: new(bytes.Buffer)}
- const body = "foo"
- io.WriteString(p, body)
- a := errors.New("test error")
- p.CloseWithError(a)
- all, err := ioutil.ReadAll(p)
- if string(all) != body {
- t.Errorf("read bytes = %q; want %q", all, body)
- }
- if err != a {
- t.Logf("read error = %v, %v", err, a)
- }
-}
-
-func TestPipeBreakWithError(t *testing.T) {
- p := &pipe{b: new(bytes.Buffer)}
- io.WriteString(p, "foo")
- a := errors.New("test err")
- p.BreakWithError(a)
- all, err := ioutil.ReadAll(p)
- if string(all) != "" {
- t.Errorf("read bytes = %q; want empty string", all)
- }
- if err != a {
- t.Logf("read error = %v, %v", err, a)
- }
-}
diff --git a/vendor/golang.org/x/net/http2/priority_test.go b/vendor/golang.org/x/net/http2/priority_test.go
deleted file mode 100644
index a3fe2bb4..00000000
--- a/vendor/golang.org/x/net/http2/priority_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "testing"
-)
-
-func TestPriority(t *testing.T) {
- // A -> B
- // move A's parent to B
- streams := make(map[uint32]*stream)
- a := &stream{
- parent: nil,
- weight: 16,
- }
- streams[1] = a
- b := &stream{
- parent: a,
- weight: 16,
- }
- streams[2] = b
- adjustStreamPriority(streams, 1, PriorityParam{
- Weight: 20,
- StreamDep: 2,
- })
- if a.parent != b {
- t.Errorf("Expected A's parent to be B")
- }
- if a.weight != 20 {
- t.Errorf("Expected A's weight to be 20; got %d", a.weight)
- }
- if b.parent != nil {
- t.Errorf("Expected B to have no parent")
- }
- if b.weight != 16 {
- t.Errorf("Expected B's weight to be 16; got %d", b.weight)
- }
-}
-
-func TestPriorityExclusiveZero(t *testing.T) {
- // A B and C are all children of the 0 stream.
- // Exclusive reprioritization to any of the streams
- // should bring the rest of the streams under the
- // reprioritized stream
- streams := make(map[uint32]*stream)
- a := &stream{
- parent: nil,
- weight: 16,
- }
- streams[1] = a
- b := &stream{
- parent: nil,
- weight: 16,
- }
- streams[2] = b
- c := &stream{
- parent: nil,
- weight: 16,
- }
- streams[3] = c
- adjustStreamPriority(streams, 3, PriorityParam{
- Weight: 20,
- StreamDep: 0,
- Exclusive: true,
- })
- if a.parent != c {
- t.Errorf("Expected A's parent to be C")
- }
- if a.weight != 16 {
- t.Errorf("Expected A's weight to be 16; got %d", a.weight)
- }
- if b.parent != c {
- t.Errorf("Expected B's parent to be C")
- }
- if b.weight != 16 {
- t.Errorf("Expected B's weight to be 16; got %d", b.weight)
- }
- if c.parent != nil {
- t.Errorf("Expected C to have no parent")
- }
- if c.weight != 20 {
- t.Errorf("Expected C's weight to be 20; got %d", b.weight)
- }
-}
-
-func TestPriorityOwnParent(t *testing.T) {
- streams := make(map[uint32]*stream)
- a := &stream{
- parent: nil,
- weight: 16,
- }
- streams[1] = a
- b := &stream{
- parent: a,
- weight: 16,
- }
- streams[2] = b
- adjustStreamPriority(streams, 1, PriorityParam{
- Weight: 20,
- StreamDep: 1,
- })
- if a.parent != nil {
- t.Errorf("Expected A's parent to be nil")
- }
- if a.weight != 20 {
- t.Errorf("Expected A's weight to be 20; got %d", a.weight)
- }
- if b.parent != a {
- t.Errorf("Expected B's parent to be A")
- }
- if b.weight != 16 {
- t.Errorf("Expected B's weight to be 16; got %d", b.weight)
- }
-
-}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
deleted file mode 100644
index 915e2571..00000000
--- a/vendor/golang.org/x/net/http2/server.go
+++ /dev/null
@@ -1,2149 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO: replace all <-sc.doneServing with reads from the stream's cw
-// instead, and make sure that on close we close all open
-// streams. then remove doneServing?
-
-// TODO: re-audit GOAWAY support. Consider each incoming frame type and
-// whether it should be ignored during graceful shutdown.
-
-// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
-// configurable? or maximum number of idle clients and remove the
-// oldest?
-
-// TODO: turn off the serve goroutine when idle, so
-// an idle conn only has the readFrames goroutine active. (which could
-// also be optimized probably to pin less memory in crypto/tls). This
-// would involve tracking when the serve goroutine is active (atomic
-// int32 read/CAS probably?) and starting it up when frames arrive,
-// and shutting it down when all handlers exit. the occasional PING
-// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
-// (which is a no-op if already running) and then queue the PING write
-// as normal. The serve loop would then exit in most cases (if no
-// Handlers running) and not be woken up again until the PING packet
-// returns.
-
-// TODO (maybe): add a mechanism for Handlers to going into
-// half-closed-local mode (rw.(io.Closer) test?) but not exit their
-// handler, and continue to be able to read from the
-// Request.Body. This would be a somewhat semantic change from HTTP/1
-// (or at least what we expose in net/http), so I'd probably want to
-// add it there too. For now, this package says that returning from
-// the Handler ServeHTTP function means you're both done reading and
-// done writing, without a way to stop just one or the other.
-
-package http2
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "log"
- "net"
- "net/http"
- "net/textproto"
- "net/url"
- "os"
- "reflect"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/http2/hpack"
-)
-
-const (
- prefaceTimeout = 10 * time.Second
- firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- handlerChunkWriteSize = 4 << 10
- defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
-)
-
-var (
- errClientDisconnected = errors.New("client disconnected")
- errClosedBody = errors.New("body closed by handler")
- errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
- errStreamClosed = errors.New("http2: stream closed")
-)
-
-var responseWriterStatePool = sync.Pool{
- New: func() interface{} {
- rws := &responseWriterState{}
- rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
- return rws
- },
-}
-
-// Test hooks.
-var (
- testHookOnConn func()
- testHookGetServerConn func(*serverConn)
- testHookOnPanicMu *sync.Mutex // nil except in tests
- testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
-)
-
-// Server is an HTTP/2 server.
-type Server struct {
- // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
- // which may run at a time over all connections.
- // Negative or zero no limit.
- // TODO: implement
- MaxHandlers int
-
- // MaxConcurrentStreams optionally specifies the number of
- // concurrent streams that each client may have open at a
- // time. This is unrelated to the number of http.Handler goroutines
- // which may be active globally, which is MaxHandlers.
- // If zero, MaxConcurrentStreams defaults to at least 100, per
- // the HTTP/2 spec's recommendations.
- MaxConcurrentStreams uint32
-
- // MaxReadFrameSize optionally specifies the largest frame
- // this server is willing to read. A valid value is between
- // 16k and 16M, inclusive. If zero or otherwise invalid, a
- // default value is used.
- MaxReadFrameSize uint32
-
- // PermitProhibitedCipherSuites, if true, permits the use of
- // cipher suites prohibited by the HTTP/2 spec.
- PermitProhibitedCipherSuites bool
-}
-
-func (s *Server) maxReadFrameSize() uint32 {
- if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
- return v
- }
- return defaultMaxReadFrameSize
-}
-
-func (s *Server) maxConcurrentStreams() uint32 {
- if v := s.MaxConcurrentStreams; v > 0 {
- return v
- }
- return defaultMaxStreams
-}
-
-// ConfigureServer adds HTTP/2 support to a net/http Server.
-//
-// The configuration conf may be nil.
-//
-// ConfigureServer must be called before s begins serving.
-func ConfigureServer(s *http.Server, conf *Server) error {
- if conf == nil {
- conf = new(Server)
- }
-
- if s.TLSConfig == nil {
- s.TLSConfig = new(tls.Config)
- } else if s.TLSConfig.CipherSuites != nil {
- // If they already provided a CipherSuite list, return
- // an error if it has a bad order or is missing
- // ECDHE_RSA_WITH_AES_128_GCM_SHA256.
- const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- haveRequired := false
- sawBad := false
- for i, cs := range s.TLSConfig.CipherSuites {
- if cs == requiredCipher {
- haveRequired = true
- }
- if isBadCipher(cs) {
- sawBad = true
- } else if sawBad {
- return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
- }
- }
- if !haveRequired {
- return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
- }
- }
-
- // Note: not setting MinVersion to tls.VersionTLS12,
- // as we don't want to interfere with HTTP/1.1 traffic
- // on the user's server. We enforce TLS 1.2 later once
- // we accept a connection. Ideally this should be done
- // during next-proto selection, but using TLS <1.2 with
- // HTTP/2 is still the client's bug.
-
- s.TLSConfig.PreferServerCipherSuites = true
-
- haveNPN := false
- for _, p := range s.TLSConfig.NextProtos {
- if p == NextProtoTLS {
- haveNPN = true
- break
- }
- }
- if !haveNPN {
- s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
- }
- // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
- // to switch to "h2".
- s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
-
- if s.TLSNextProto == nil {
- s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
- }
- protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
- if testHookOnConn != nil {
- testHookOnConn()
- }
- conf.ServeConn(c, &ServeConnOpts{
- Handler: h,
- BaseConfig: hs,
- })
- }
- s.TLSNextProto[NextProtoTLS] = protoHandler
- s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
- return nil
-}
-
-// ServeConnOpts are options for the Server.ServeConn method.
-type ServeConnOpts struct {
- // BaseConfig optionally sets the base configuration
- // for values. If nil, defaults are used.
- BaseConfig *http.Server
-
- // Handler specifies which handler to use for processing
- // requests. If nil, BaseConfig.Handler is used. If BaseConfig
- // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
- Handler http.Handler
-}
-
-func (o *ServeConnOpts) baseConfig() *http.Server {
- if o != nil && o.BaseConfig != nil {
- return o.BaseConfig
- }
- return new(http.Server)
-}
-
-func (o *ServeConnOpts) handler() http.Handler {
- if o != nil {
- if o.Handler != nil {
- return o.Handler
- }
- if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
- return o.BaseConfig.Handler
- }
- }
- return http.DefaultServeMux
-}
-
-// ServeConn serves HTTP/2 requests on the provided connection and
-// blocks until the connection is no longer readable.
-//
-// ServeConn starts speaking HTTP/2 assuming that c has not had any
-// reads or writes. It writes its initial settings frame and expects
-// to be able to read the preface and settings frame from the
-// client. If c has a ConnectionState method like a *tls.Conn, the
-// ConnectionState is used to verify the TLS ciphersuite and to set
-// the Request.TLS field in Handlers.
-//
-// ServeConn does not support h2c by itself. Any h2c support must be
-// implemented in terms of providing a suitably-behaving net.Conn.
-//
-// The opts parameter is optional. If nil, default values are used.
-func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
- sc := &serverConn{
- srv: s,
- hs: opts.baseConfig(),
- conn: c,
- remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
- handler: opts.handler(),
- streams: make(map[uint32]*stream),
- readFrameCh: make(chan readFrameResult),
- wantWriteFrameCh: make(chan frameWriteMsg, 8),
- wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
- bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
- doneServing: make(chan struct{}),
- advMaxStreams: s.maxConcurrentStreams(),
- writeSched: writeScheduler{
- maxFrameSize: initialMaxFrameSize,
- },
- initialWindowSize: initialWindowSize,
- headerTableSize: initialHeaderTableSize,
- serveG: newGoroutineLock(),
- pushEnabled: true,
- }
- sc.flow.add(initialWindowSize)
- sc.inflow.add(initialWindowSize)
- sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
-
- fr := NewFramer(sc.bw, c)
- fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
- fr.MaxHeaderListSize = sc.maxHeaderListSize()
- fr.SetMaxReadFrameSize(s.maxReadFrameSize())
- sc.framer = fr
-
- if tc, ok := c.(connectionStater); ok {
- sc.tlsState = new(tls.ConnectionState)
- *sc.tlsState = tc.ConnectionState()
- // 9.2 Use of TLS Features
- // An implementation of HTTP/2 over TLS MUST use TLS
- // 1.2 or higher with the restrictions on feature set
- // and cipher suite described in this section. Due to
- // implementation limitations, it might not be
- // possible to fail TLS negotiation. An endpoint MUST
- // immediately terminate an HTTP/2 connection that
- // does not meet the TLS requirements described in
- // this section with a connection error (Section
- // 5.4.1) of type INADEQUATE_SECURITY.
- if sc.tlsState.Version < tls.VersionTLS12 {
- sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
- return
- }
-
- if sc.tlsState.ServerName == "" {
- // Client must use SNI, but we don't enforce that anymore,
- // since it was causing problems when connecting to bare IP
- // addresses during development.
- //
- // TODO: optionally enforce? Or enforce at the time we receive
- // a new request, and verify the the ServerName matches the :authority?
- // But that precludes proxy situations, perhaps.
- //
- // So for now, do nothing here again.
- }
-
- if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
- // "Endpoints MAY choose to generate a connection error
- // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
- // the prohibited cipher suites are negotiated."
- //
- // We choose that. In my opinion, the spec is weak
- // here. It also says both parties must support at least
- // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
- // excuses here. If we really must, we could allow an
- // "AllowInsecureWeakCiphers" option on the server later.
- // Let's see how it plays out first.
- sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
- return
- }
- }
-
- if hook := testHookGetServerConn; hook != nil {
- hook(sc)
- }
- sc.serve()
-}
-
-// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
-func isBadCipher(cipher uint16) bool {
- switch cipher {
- case tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
- // Reject cipher suites from Appendix A.
- // "This list includes those cipher suites that do not
- // offer an ephemeral key exchange and those that are
- // based on the TLS null, stream or block cipher type"
- return true
- default:
- return false
- }
-}
-
-func (sc *serverConn) rejectConn(err ErrCode, debug string) {
- sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
- // ignoring errors. hanging up anyway.
- sc.framer.WriteGoAway(0, err, []byte(debug))
- sc.bw.Flush()
- sc.conn.Close()
-}
-
-type serverConn struct {
- // Immutable:
- srv *Server
- hs *http.Server
- conn net.Conn
- bw *bufferedWriter // writing to conn
- handler http.Handler
- framer *Framer
- doneServing chan struct{} // closed when serverConn.serve ends
- readFrameCh chan readFrameResult // written by serverConn.readFrames
- wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
- wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
- bodyReadCh chan bodyReadMsg // from handlers -> serve
- testHookCh chan func(int) // code to run on the serve loop
- flow flow // conn-wide (not stream-specific) outbound flow control
- inflow flow // conn-wide inbound flow control
- tlsState *tls.ConnectionState // shared by all handlers, like net/http
- remoteAddrStr string
-
- // Everything following is owned by the serve loop; use serveG.check():
- serveG goroutineLock // used to verify funcs are on serve()
- pushEnabled bool
- sawFirstSettings bool // got the initial SETTINGS frame after the preface
- needToSendSettingsAck bool
- unackedSettings int // how many SETTINGS have we sent without ACKs?
- clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
- advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
- curOpenStreams uint32 // client's number of open streams
- maxStreamID uint32 // max ever seen
- streams map[uint32]*stream
- initialWindowSize int32
- headerTableSize uint32
- peerMaxHeaderListSize uint32 // zero means unknown (default)
- canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
- writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
- needsFrameFlush bool // last frame write wasn't a flush
- writeSched writeScheduler
- inGoAway bool // we've started to or sent GOAWAY
- needToSendGoAway bool // we need to schedule a GOAWAY frame write
- goAwayCode ErrCode
- shutdownTimerCh <-chan time.Time // nil until used
- shutdownTimer *time.Timer // nil until used
-
- // Owned by the writeFrameAsync goroutine:
- headerWriteBuf bytes.Buffer
- hpackEncoder *hpack.Encoder
-}
-
-func (sc *serverConn) maxHeaderListSize() uint32 {
- n := sc.hs.MaxHeaderBytes
- if n <= 0 {
- n = http.DefaultMaxHeaderBytes
- }
- // http2's count is in a slightly different unit and includes 32 bytes per pair.
- // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
- const perFieldOverhead = 32 // per http2 spec
- const typicalHeaders = 10 // conservative
- return uint32(n + typicalHeaders*perFieldOverhead)
-}
-
-// stream represents a stream. This is the minimal metadata needed by
-// the serve goroutine. Most of the actual stream state is owned by
-// the http.Handler's goroutine in the responseWriter. Because the
-// responseWriter's responseWriterState is recycled at the end of a
-// handler, this struct intentionally has no pointer to the
-// *responseWriter{,State} itself, as the Handler ending nils out the
-// responseWriter's state field.
-type stream struct {
- // immutable:
- sc *serverConn
- id uint32
- body *pipe // non-nil if expecting DATA frames
- cw closeWaiter // closed wait stream transitions to closed state
-
- // owned by serverConn's serve loop:
- bodyBytes int64 // body bytes seen so far
- declBodyBytes int64 // or -1 if undeclared
- flow flow // limits writing from Handler to client
- inflow flow // what the client is allowed to POST/etc to us
- parent *stream // or nil
- numTrailerValues int64
- weight uint8
- state streamState
- sentReset bool // only true once detached from streams map
- gotReset bool // only true once detacted from streams map
- gotTrailerHeader bool // HEADER frame for trailers was seen
-
- trailer http.Header // accumulated trailers
- reqTrailer http.Header // handler's Request.Trailer
-}
-
-func (sc *serverConn) Framer() *Framer { return sc.framer }
-func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
-func (sc *serverConn) Flush() error { return sc.bw.Flush() }
-func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
- return sc.hpackEncoder, &sc.headerWriteBuf
-}
-
-func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
- sc.serveG.check()
- // http://http2.github.io/http2-spec/#rfc.section.5.1
- if st, ok := sc.streams[streamID]; ok {
- return st.state, st
- }
- // "The first use of a new stream identifier implicitly closes all
- // streams in the "idle" state that might have been initiated by
- // that peer with a lower-valued stream identifier. For example, if
- // a client sends a HEADERS frame on stream 7 without ever sending a
- // frame on stream 5, then stream 5 transitions to the "closed"
- // state when the first frame for stream 7 is sent or received."
- if streamID <= sc.maxStreamID {
- return stateClosed, nil
- }
- return stateIdle, nil
-}
-
-// setConnState calls the net/http ConnState hook for this connection, if configured.
-// Note that the net/http package does StateNew and StateClosed for us.
-// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
-func (sc *serverConn) setConnState(state http.ConnState) {
- if sc.hs.ConnState != nil {
- sc.hs.ConnState(sc.conn, state)
- }
-}
-
-func (sc *serverConn) vlogf(format string, args ...interface{}) {
- if VerboseLogs {
- sc.logf(format, args...)
- }
-}
-
-func (sc *serverConn) logf(format string, args ...interface{}) {
- if lg := sc.hs.ErrorLog; lg != nil {
- lg.Printf(format, args...)
- } else {
- log.Printf(format, args...)
- }
-}
-
-// errno returns v's underlying uintptr, else 0.
-//
-// TODO: remove this helper function once http2 can use build
-// tags. See comment in isClosedConnError.
-func errno(v error) uintptr {
- if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
- return uintptr(rv.Uint())
- }
- return 0
-}
-
-// isClosedConnError reports whether err is an error from use of a closed
-// network connection.
-func isClosedConnError(err error) bool {
- if err == nil {
- return false
- }
-
- // TODO: remove this string search and be more like the Windows
- // case below. That might involve modifying the standard library
- // to return better error types.
- str := err.Error()
- if strings.Contains(str, "use of closed network connection") {
- return true
- }
-
- // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
- // build tags, so I can't make an http2_windows.go file with
- // Windows-specific stuff. Fix that and move this, once we
- // have a way to bundle this into std's net/http somehow.
- if runtime.GOOS == "windows" {
- if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
- if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
- const WSAECONNABORTED = 10053
- const WSAECONNRESET = 10054
- if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
- return true
- }
- }
- }
- }
- return false
-}
-
-func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
- if err == nil {
- return
- }
- if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
- // Boring, expected errors.
- sc.vlogf(format, args...)
- } else {
- sc.logf(format, args...)
- }
-}
-
-func (sc *serverConn) canonicalHeader(v string) string {
- sc.serveG.check()
- cv, ok := commonCanonHeader[v]
- if ok {
- return cv
- }
- cv, ok = sc.canonHeader[v]
- if ok {
- return cv
- }
- if sc.canonHeader == nil {
- sc.canonHeader = make(map[string]string)
- }
- cv = http.CanonicalHeaderKey(v)
- sc.canonHeader[v] = cv
- return cv
-}
-
-type readFrameResult struct {
- f Frame // valid until readMore is called
- err error
-
- // readMore should be called once the consumer no longer needs or
- // retains f. After readMore, f is invalid and more frames can be
- // read.
- readMore func()
-}
-
-// readFrames is the loop that reads incoming frames.
-// It takes care to only read one frame at a time, blocking until the
-// consumer is done with the frame.
-// It's run on its own goroutine.
-func (sc *serverConn) readFrames() {
- gate := make(gate)
- for {
- f, err := sc.framer.ReadFrame()
- select {
- case sc.readFrameCh <- readFrameResult{f, err, gate.Done}:
- case <-sc.doneServing:
- return
- }
- select {
- case <-gate:
- case <-sc.doneServing:
- return
- }
- if terminalReadFrameError(err) {
- return
- }
- }
-}
-
-// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
-type frameWriteResult struct {
- wm frameWriteMsg // what was written (or attempted)
- err error // result of the writeFrame call
-}
-
-// writeFrameAsync runs in its own goroutine and writes a single frame
-// and then reports when it's done.
-// At most one goroutine can be running writeFrameAsync at a time per
-// serverConn.
-func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
- err := wm.write.writeFrame(sc)
- sc.wroteFrameCh <- frameWriteResult{wm, err}
-}
-
-func (sc *serverConn) closeAllStreamsOnConnClose() {
- sc.serveG.check()
- for _, st := range sc.streams {
- sc.closeStream(st, errClientDisconnected)
- }
-}
-
-func (sc *serverConn) stopShutdownTimer() {
- sc.serveG.check()
- if t := sc.shutdownTimer; t != nil {
- t.Stop()
- }
-}
-
-func (sc *serverConn) notePanic() {
- // Note: this is for serverConn.serve panicking, not http.Handler code.
- if testHookOnPanicMu != nil {
- testHookOnPanicMu.Lock()
- defer testHookOnPanicMu.Unlock()
- }
- if testHookOnPanic != nil {
- if e := recover(); e != nil {
- if testHookOnPanic(sc, e) {
- panic(e)
- }
- }
- }
-}
-
-func (sc *serverConn) serve() {
- sc.serveG.check()
- defer sc.notePanic()
- defer sc.conn.Close()
- defer sc.closeAllStreamsOnConnClose()
- defer sc.stopShutdownTimer()
- defer close(sc.doneServing) // unblocks handlers trying to send
-
- if VerboseLogs {
- sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
- }
-
- sc.writeFrame(frameWriteMsg{
- write: writeSettings{
- {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
- {SettingMaxConcurrentStreams, sc.advMaxStreams},
- {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
-
- // TODO: more actual settings, notably
- // SettingInitialWindowSize, but then we also
- // want to bump up the conn window size the
- // same amount here right after the settings
- },
- })
- sc.unackedSettings++
-
- if err := sc.readPreface(); err != nil {
- sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
- return
- }
- // Now that we've got the preface, get us out of the
- // "StateNew" state. We can't go directly to idle, though.
- // Active means we read some data and anticipate a request. We'll
- // do another Active when we get a HEADERS frame.
- sc.setConnState(http.StateActive)
- sc.setConnState(http.StateIdle)
-
- go sc.readFrames() // closed by defer sc.conn.Close above
-
- settingsTimer := time.NewTimer(firstSettingsTimeout)
- loopNum := 0
- for {
- loopNum++
- select {
- case wm := <-sc.wantWriteFrameCh:
- sc.writeFrame(wm)
- case res := <-sc.wroteFrameCh:
- sc.wroteFrame(res)
- case res := <-sc.readFrameCh:
- if !sc.processFrameFromReader(res) {
- return
- }
- res.readMore()
- if settingsTimer.C != nil {
- settingsTimer.Stop()
- settingsTimer.C = nil
- }
- case m := <-sc.bodyReadCh:
- sc.noteBodyRead(m.st, m.n)
- case <-settingsTimer.C:
- sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
- return
- case <-sc.shutdownTimerCh:
- sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
- return
- case fn := <-sc.testHookCh:
- fn(loopNum)
- }
- }
-}
-
-// readPreface reads the ClientPreface greeting from the peer
-// or returns an error on timeout or an invalid greeting.
-func (sc *serverConn) readPreface() error {
- errc := make(chan error, 1)
- go func() {
- // Read the client preface
- buf := make([]byte, len(ClientPreface))
- if _, err := io.ReadFull(sc.conn, buf); err != nil {
- errc <- err
- } else if !bytes.Equal(buf, clientPreface) {
- errc <- fmt.Errorf("bogus greeting %q", buf)
- } else {
- errc <- nil
- }
- }()
- timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
- defer timer.Stop()
- select {
- case <-timer.C:
- return errors.New("timeout waiting for client preface")
- case err := <-errc:
- if err == nil {
- if VerboseLogs {
- sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
- }
- }
- return err
- }
-}
-
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
-var writeDataPool = sync.Pool{
- New: func() interface{} { return new(writeData) },
-}
-
-// writeDataFromHandler writes DATA response frames from a handler on
-// the given stream.
-func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
- writeArg := writeDataPool.Get().(*writeData)
- *writeArg = writeData{stream.id, data, endStream}
- err := sc.writeFrameFromHandler(frameWriteMsg{
- write: writeArg,
- stream: stream,
- done: ch,
- })
- if err != nil {
- return err
- }
- var frameWriteDone bool // the frame write is done (successfully or not)
- select {
- case err = <-ch:
- frameWriteDone = true
- case <-sc.doneServing:
- return errClientDisconnected
- case <-stream.cw:
- // If both ch and stream.cw were ready (as might
- // happen on the final Write after an http.Handler
- // ends), prefer the write result. Otherwise this
- // might just be us successfully closing the stream.
- // The writeFrameAsync and serve goroutines guarantee
- // that the ch send will happen before the stream.cw
- // close.
- select {
- case err = <-ch:
- frameWriteDone = true
- default:
- return errStreamClosed
- }
- }
- errChanPool.Put(ch)
- if frameWriteDone {
- writeDataPool.Put(writeArg)
- }
- return err
-}
-
-// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
-// if the connection has gone away.
-//
-// This must not be run from the serve goroutine itself, else it might
-// deadlock writing to sc.wantWriteFrameCh (which is only mildly
-// buffered and is read by serve itself). If you're on the serve
-// goroutine, call writeFrame instead.
-func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
- sc.serveG.checkNotOn() // NOT
- select {
- case sc.wantWriteFrameCh <- wm:
- return nil
- case <-sc.doneServing:
- // Serve loop is gone.
- // Client has closed their connection to the server.
- return errClientDisconnected
- }
-}
-
-// writeFrame schedules a frame to write and sends it if there's nothing
-// already being written.
-//
-// There is no pushback here (the serve goroutine never blocks). It's
-// the http.Handlers that block, waiting for their previous frames to
-// make it onto the wire
-//
-// If you're not on the serve goroutine, use writeFrameFromHandler instead.
-func (sc *serverConn) writeFrame(wm frameWriteMsg) {
- sc.serveG.check()
- sc.writeSched.add(wm)
- sc.scheduleFrameWrite()
-}
-
-// startFrameWrite starts a goroutine to write wm (in a separate
-// goroutine since that might block on the network), and updates the
-// serve goroutine's state about the world, updated from info in wm.
-func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
- sc.serveG.check()
- if sc.writingFrame {
- panic("internal error: can only be writing one frame at a time")
- }
-
- st := wm.stream
- if st != nil {
- switch st.state {
- case stateHalfClosedLocal:
- panic("internal error: attempt to send frame on half-closed-local stream")
- case stateClosed:
- if st.sentReset || st.gotReset {
- // Skip this frame.
- sc.scheduleFrameWrite()
- return
- }
- panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
- }
- }
-
- sc.writingFrame = true
- sc.needsFrameFlush = true
- go sc.writeFrameAsync(wm)
-}
-
-// errHandlerPanicked is the error given to any callers blocked in a read from
-// Request.Body when the main goroutine panics. Since most handlers read in the
-// the main ServeHTTP goroutine, this will show up rarely.
-var errHandlerPanicked = errors.New("http2: handler panicked")
-
-// wroteFrame is called on the serve goroutine with the result of
-// whatever happened on writeFrameAsync.
-func (sc *serverConn) wroteFrame(res frameWriteResult) {
- sc.serveG.check()
- if !sc.writingFrame {
- panic("internal error: expected to be already writing a frame")
- }
- sc.writingFrame = false
-
- wm := res.wm
- st := wm.stream
-
- closeStream := endsStream(wm.write)
-
- if _, ok := wm.write.(handlerPanicRST); ok {
- sc.closeStream(st, errHandlerPanicked)
- }
-
- // Reply (if requested) to the blocked ServeHTTP goroutine.
- if ch := wm.done; ch != nil {
- select {
- case ch <- res.err:
- default:
- panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
- }
- }
- wm.write = nil // prevent use (assume it's tainted after wm.done send)
-
- if closeStream {
- if st == nil {
- panic("internal error: expecting non-nil stream")
- }
- switch st.state {
- case stateOpen:
- // Here we would go to stateHalfClosedLocal in
- // theory, but since our handler is done and
- // the net/http package provides no mechanism
- // for finishing writing to a ResponseWriter
- // while still reading data (see possible TODO
- // at top of this file), we go into closed
- // state here anyway, after telling the peer
- // we're hanging up on them.
- st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
- errCancel := StreamError{st.id, ErrCodeCancel}
- sc.resetStream(errCancel)
- case stateHalfClosedRemote:
- sc.closeStream(st, errHandlerComplete)
- }
- }
-
- sc.scheduleFrameWrite()
-}
-
-// scheduleFrameWrite tickles the frame writing scheduler.
-//
-// If a frame is already being written, nothing happens. This will be called again
-// when the frame is done being written.
-//
-// If a frame isn't being written we need to send one, the best frame
-// to send is selected, preferring first things that aren't
-// stream-specific (e.g. ACKing settings), and then finding the
-// highest priority stream.
-//
-// If a frame isn't being written and there's nothing else to send, we
-// flush the write buffer.
-func (sc *serverConn) scheduleFrameWrite() {
- sc.serveG.check()
- if sc.writingFrame {
- return
- }
- if sc.needToSendGoAway {
- sc.needToSendGoAway = false
- sc.startFrameWrite(frameWriteMsg{
- write: &writeGoAway{
- maxStreamID: sc.maxStreamID,
- code: sc.goAwayCode,
- },
- })
- return
- }
- if sc.needToSendSettingsAck {
- sc.needToSendSettingsAck = false
- sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
- return
- }
- if !sc.inGoAway {
- if wm, ok := sc.writeSched.take(); ok {
- sc.startFrameWrite(wm)
- return
- }
- }
- if sc.needsFrameFlush {
- sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
- sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
- return
- }
-}
-
-func (sc *serverConn) goAway(code ErrCode) {
- sc.serveG.check()
- if sc.inGoAway {
- return
- }
- if code != ErrCodeNo {
- sc.shutDownIn(250 * time.Millisecond)
- } else {
- // TODO: configurable
- sc.shutDownIn(1 * time.Second)
- }
- sc.inGoAway = true
- sc.needToSendGoAway = true
- sc.goAwayCode = code
- sc.scheduleFrameWrite()
-}
-
-func (sc *serverConn) shutDownIn(d time.Duration) {
- sc.serveG.check()
- sc.shutdownTimer = time.NewTimer(d)
- sc.shutdownTimerCh = sc.shutdownTimer.C
-}
-
-func (sc *serverConn) resetStream(se StreamError) {
- sc.serveG.check()
- sc.writeFrame(frameWriteMsg{write: se})
- if st, ok := sc.streams[se.StreamID]; ok {
- st.sentReset = true
- sc.closeStream(st, se)
- }
-}
-
-// processFrameFromReader processes the serve loop's read from readFrameCh from the
-// frame-reading goroutine.
-// processFrameFromReader returns whether the connection should be kept open.
-func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
- sc.serveG.check()
- err := res.err
- if err != nil {
- if err == ErrFrameTooLarge {
- sc.goAway(ErrCodeFrameSize)
- return true // goAway will close the loop
- }
- clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
- if clientGone {
- // TODO: could we also get into this state if
- // the peer does a half close
- // (e.g. CloseWrite) because they're done
- // sending frames but they're still wanting
- // our open replies? Investigate.
- // TODO: add CloseWrite to crypto/tls.Conn first
- // so we have a way to test this? I suppose
- // just for testing we could have a non-TLS mode.
- return false
- }
- } else {
- f := res.f
- if VerboseLogs {
- sc.vlogf("http2: server read frame %v", summarizeFrame(f))
- }
- err = sc.processFrame(f)
- if err == nil {
- return true
- }
- }
-
- switch ev := err.(type) {
- case StreamError:
- sc.resetStream(ev)
- return true
- case goAwayFlowError:
- sc.goAway(ErrCodeFlowControl)
- return true
- case ConnectionError:
- sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
- sc.goAway(ErrCode(ev))
- return true // goAway will handle shutdown
- default:
- if res.err != nil {
- sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
- } else {
- sc.logf("http2: server closing client connection: %v", err)
- }
- return false
- }
-}
-
-func (sc *serverConn) processFrame(f Frame) error {
- sc.serveG.check()
-
- // First frame received must be SETTINGS.
- if !sc.sawFirstSettings {
- if _, ok := f.(*SettingsFrame); !ok {
- return ConnectionError(ErrCodeProtocol)
- }
- sc.sawFirstSettings = true
- }
-
- switch f := f.(type) {
- case *SettingsFrame:
- return sc.processSettings(f)
- case *MetaHeadersFrame:
- return sc.processHeaders(f)
- case *WindowUpdateFrame:
- return sc.processWindowUpdate(f)
- case *PingFrame:
- return sc.processPing(f)
- case *DataFrame:
- return sc.processData(f)
- case *RSTStreamFrame:
- return sc.processResetStream(f)
- case *PriorityFrame:
- return sc.processPriority(f)
- case *PushPromiseFrame:
- // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
- // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- return ConnectionError(ErrCodeProtocol)
- default:
- sc.vlogf("http2: server ignoring frame: %v", f.Header())
- return nil
- }
-}
-
-func (sc *serverConn) processPing(f *PingFrame) error {
- sc.serveG.check()
- if f.IsAck() {
- // 6.7 PING: " An endpoint MUST NOT respond to PING frames
- // containing this flag."
- return nil
- }
- if f.StreamID != 0 {
- // "PING frames are not associated with any individual
- // stream. If a PING frame is received with a stream
- // identifier field value other than 0x0, the recipient MUST
- // respond with a connection error (Section 5.4.1) of type
- // PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
- }
- sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
- return nil
-}
-
-func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
- sc.serveG.check()
- switch {
- case f.StreamID != 0: // stream-level flow control
- st := sc.streams[f.StreamID]
- if st == nil {
- // "WINDOW_UPDATE can be sent by a peer that has sent a
- // frame bearing the END_STREAM flag. This means that a
- // receiver could receive a WINDOW_UPDATE frame on a "half
- // closed (remote)" or "closed" stream. A receiver MUST
- // NOT treat this as an error, see Section 5.1."
- return nil
- }
- if !st.flow.add(int32(f.Increment)) {
- return StreamError{f.StreamID, ErrCodeFlowControl}
- }
- default: // connection-level flow control
- if !sc.flow.add(int32(f.Increment)) {
- return goAwayFlowError{}
- }
- }
- sc.scheduleFrameWrite()
- return nil
-}
-
-func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
- sc.serveG.check()
-
- state, st := sc.state(f.StreamID)
- if state == stateIdle {
- // 6.4 "RST_STREAM frames MUST NOT be sent for a
- // stream in the "idle" state. If a RST_STREAM frame
- // identifying an idle stream is received, the
- // recipient MUST treat this as a connection error
- // (Section 5.4.1) of type PROTOCOL_ERROR.
- return ConnectionError(ErrCodeProtocol)
- }
- if st != nil {
- st.gotReset = true
- sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
- }
- return nil
-}
-
-func (sc *serverConn) closeStream(st *stream, err error) {
- sc.serveG.check()
- if st.state == stateIdle || st.state == stateClosed {
- panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
- }
- st.state = stateClosed
- sc.curOpenStreams--
- if sc.curOpenStreams == 0 {
- sc.setConnState(http.StateIdle)
- }
- delete(sc.streams, st.id)
- if p := st.body; p != nil {
- p.CloseWithError(err)
- }
- st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
- sc.writeSched.forgetStream(st.id)
-}
-
-func (sc *serverConn) processSettings(f *SettingsFrame) error {
- sc.serveG.check()
- if f.IsAck() {
- sc.unackedSettings--
- if sc.unackedSettings < 0 {
- // Why is the peer ACKing settings we never sent?
- // The spec doesn't mention this case, but
- // hang up on them anyway.
- return ConnectionError(ErrCodeProtocol)
- }
- return nil
- }
- if err := f.ForeachSetting(sc.processSetting); err != nil {
- return err
- }
- sc.needToSendSettingsAck = true
- sc.scheduleFrameWrite()
- return nil
-}
-
-func (sc *serverConn) processSetting(s Setting) error {
- sc.serveG.check()
- if err := s.Valid(); err != nil {
- return err
- }
- if VerboseLogs {
- sc.vlogf("http2: server processing setting %v", s)
- }
- switch s.ID {
- case SettingHeaderTableSize:
- sc.headerTableSize = s.Val
- sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
- case SettingEnablePush:
- sc.pushEnabled = s.Val != 0
- case SettingMaxConcurrentStreams:
- sc.clientMaxStreams = s.Val
- case SettingInitialWindowSize:
- return sc.processSettingInitialWindowSize(s.Val)
- case SettingMaxFrameSize:
- sc.writeSched.maxFrameSize = s.Val
- case SettingMaxHeaderListSize:
- sc.peerMaxHeaderListSize = s.Val
- default:
- // Unknown setting: "An endpoint that receives a SETTINGS
- // frame with any unknown or unsupported identifier MUST
- // ignore that setting."
- if VerboseLogs {
- sc.vlogf("http2: server ignoring unknown setting %v", s)
- }
- }
- return nil
-}
-
-func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
- sc.serveG.check()
- // Note: val already validated to be within range by
- // processSetting's Valid call.
-
- // "A SETTINGS frame can alter the initial flow control window
- // size for all current streams. When the value of
- // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
- // adjust the size of all stream flow control windows that it
- // maintains by the difference between the new value and the
- // old value."
- old := sc.initialWindowSize
- sc.initialWindowSize = int32(val)
- growth := sc.initialWindowSize - old // may be negative
- for _, st := range sc.streams {
- if !st.flow.add(growth) {
- // 6.9.2 Initial Flow Control Window Size
- // "An endpoint MUST treat a change to
- // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
- // control window to exceed the maximum size as a
- // connection error (Section 5.4.1) of type
- // FLOW_CONTROL_ERROR."
- return ConnectionError(ErrCodeFlowControl)
- }
- }
- return nil
-}
-
-func (sc *serverConn) processData(f *DataFrame) error {
- sc.serveG.check()
- // "If a DATA frame is received whose stream is not in "open"
- // or "half closed (local)" state, the recipient MUST respond
- // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
- id := f.Header().StreamID
- st, ok := sc.streams[id]
- if !ok || st.state != stateOpen || st.gotTrailerHeader {
- // This includes sending a RST_STREAM if the stream is
- // in stateHalfClosedLocal (which currently means that
- // the http.Handler returned, so it's done reading &
- // done writing). Try to stop the client from sending
- // more DATA.
- return StreamError{id, ErrCodeStreamClosed}
- }
- if st.body == nil {
- panic("internal error: should have a body in this state")
- }
- data := f.Data()
-
- // Sender sending more than they'd declared?
- if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
- st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
- return StreamError{id, ErrCodeStreamClosed}
- }
- if len(data) > 0 {
- // Check whether the client has flow control quota.
- if int(st.inflow.available()) < len(data) {
- return StreamError{id, ErrCodeFlowControl}
- }
- st.inflow.take(int32(len(data)))
- wrote, err := st.body.Write(data)
- if err != nil {
- return StreamError{id, ErrCodeStreamClosed}
- }
- if wrote != len(data) {
- panic("internal error: bad Writer")
- }
- st.bodyBytes += int64(len(data))
- }
- if f.StreamEnded() {
- st.endStream()
- }
- return nil
-}
-
-// endStream closes a Request.Body's pipe. It is called when a DATA
-// frame says a request body is over (or after trailers).
-func (st *stream) endStream() {
- sc := st.sc
- sc.serveG.check()
-
- if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
- st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
- st.declBodyBytes, st.bodyBytes))
- } else {
- st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
- st.body.CloseWithError(io.EOF)
- }
- st.state = stateHalfClosedRemote
-}
-
-// copyTrailersToHandlerRequest is run in the Handler's goroutine in
-// its Request.Body.Read just before it gets io.EOF.
-func (st *stream) copyTrailersToHandlerRequest() {
- for k, vv := range st.trailer {
- if _, ok := st.reqTrailer[k]; ok {
- // Only copy it over it was pre-declared.
- st.reqTrailer[k] = vv
- }
- }
-}
-
-func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
- sc.serveG.check()
- id := f.Header().StreamID
- if sc.inGoAway {
- // Ignore.
- return nil
- }
- // http://http2.github.io/http2-spec/#rfc.section.5.1.1
- // Streams initiated by a client MUST use odd-numbered stream
- // identifiers. [...] An endpoint that receives an unexpected
- // stream identifier MUST respond with a connection error
- // (Section 5.4.1) of type PROTOCOL_ERROR.
- if id%2 != 1 {
- return ConnectionError(ErrCodeProtocol)
- }
- // A HEADERS frame can be used to create a new stream or
- // send a trailer for an open one. If we already have a stream
- // open, let it process its own HEADERS frame (trailers at this
- // point, if it's valid).
- st := sc.streams[f.Header().StreamID]
- if st != nil {
- return st.processTrailerHeaders(f)
- }
-
- // [...] The identifier of a newly established stream MUST be
- // numerically greater than all streams that the initiating
- // endpoint has opened or reserved. [...] An endpoint that
- // receives an unexpected stream identifier MUST respond with
- // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- if id <= sc.maxStreamID {
- return ConnectionError(ErrCodeProtocol)
- }
- sc.maxStreamID = id
-
- st = &stream{
- sc: sc,
- id: id,
- state: stateOpen,
- }
- if f.StreamEnded() {
- st.state = stateHalfClosedRemote
- }
- st.cw.Init()
-
- st.flow.conn = &sc.flow // link to conn-level counter
- st.flow.add(sc.initialWindowSize)
- st.inflow.conn = &sc.inflow // link to conn-level counter
- st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
-
- sc.streams[id] = st
- if f.HasPriority() {
- adjustStreamPriority(sc.streams, st.id, f.Priority)
- }
- sc.curOpenStreams++
- if sc.curOpenStreams == 1 {
- sc.setConnState(http.StateActive)
- }
- if sc.curOpenStreams > sc.advMaxStreams {
- // "Endpoints MUST NOT exceed the limit set by their
- // peer. An endpoint that receives a HEADERS frame
- // that causes their advertised concurrent stream
- // limit to be exceeded MUST treat this as a stream
- // error (Section 5.4.2) of type PROTOCOL_ERROR or
- // REFUSED_STREAM."
- if sc.unackedSettings == 0 {
- // They should know better.
- return StreamError{st.id, ErrCodeProtocol}
- }
- // Assume it's a network race, where they just haven't
- // received our last SETTINGS update. But actually
- // this can't happen yet, because we don't yet provide
- // a way for users to adjust server parameters at
- // runtime.
- return StreamError{st.id, ErrCodeRefusedStream}
- }
-
- rw, req, err := sc.newWriterAndRequest(st, f)
- if err != nil {
- return err
- }
- st.reqTrailer = req.Trailer
- if st.reqTrailer != nil {
- st.trailer = make(http.Header)
- }
- st.body = req.Body.(*requestBody).pipe // may be nil
- st.declBodyBytes = req.ContentLength
-
- handler := sc.handler.ServeHTTP
- if f.Truncated {
- // Their header list was too long. Send a 431 error.
- handler = handleHeaderListTooLong
- }
-
- go sc.runHandler(rw, req, handler)
- return nil
-}
-
-func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
- sc := st.sc
- sc.serveG.check()
- if st.gotTrailerHeader {
- return ConnectionError(ErrCodeProtocol)
- }
- st.gotTrailerHeader = true
- if !f.StreamEnded() {
- return StreamError{st.id, ErrCodeProtocol}
- }
-
- if len(f.PseudoFields()) > 0 {
- return StreamError{st.id, ErrCodeProtocol}
- }
- if st.trailer != nil {
- for _, hf := range f.RegularFields() {
- key := sc.canonicalHeader(hf.Name)
- st.trailer[key] = append(st.trailer[key], hf.Value)
- }
- }
- st.endStream()
- return nil
-}
-
-func (sc *serverConn) processPriority(f *PriorityFrame) error {
- adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
- return nil
-}
-
-func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
- st, ok := streams[streamID]
- if !ok {
- // TODO: not quite correct (this streamID might
- // already exist in the dep tree, but be closed), but
- // close enough for now.
- return
- }
- st.weight = priority.Weight
- parent := streams[priority.StreamDep] // might be nil
- if parent == st {
- // if client tries to set this stream to be the parent of itself
- // ignore and keep going
- return
- }
-
- // section 5.3.3: If a stream is made dependent on one of its
- // own dependencies, the formerly dependent stream is first
- // moved to be dependent on the reprioritized stream's previous
- // parent. The moved dependency retains its weight.
- for piter := parent; piter != nil; piter = piter.parent {
- if piter == st {
- parent.parent = st.parent
- break
- }
- }
- st.parent = parent
- if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
- for _, openStream := range streams {
- if openStream != st && openStream.parent == st.parent {
- openStream.parent = st
- }
- }
- }
-}
-
-func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
- sc.serveG.check()
-
- method := f.PseudoValue("method")
- path := f.PseudoValue("path")
- scheme := f.PseudoValue("scheme")
- authority := f.PseudoValue("authority")
-
- isConnect := method == "CONNECT"
- if isConnect {
- if path != "" || scheme != "" || authority == "" {
- return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
- }
- } else if method == "" || path == "" ||
- (scheme != "https" && scheme != "http") {
- // See 8.1.2.6 Malformed Requests and Responses:
- //
- // Malformed requests or responses that are detected
- // MUST be treated as a stream error (Section 5.4.2)
- // of type PROTOCOL_ERROR."
- //
- // 8.1.2.3 Request Pseudo-Header Fields
- // "All HTTP/2 requests MUST include exactly one valid
- // value for the :method, :scheme, and :path
- // pseudo-header fields"
- return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
- }
-
- bodyOpen := !f.StreamEnded()
- if method == "HEAD" && bodyOpen {
- // HEAD requests can't have bodies
- return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
- }
- var tlsState *tls.ConnectionState // nil if not scheme https
-
- if scheme == "https" {
- tlsState = sc.tlsState
- }
-
- header := make(http.Header)
- for _, hf := range f.RegularFields() {
- header.Add(sc.canonicalHeader(hf.Name), hf.Value)
- }
-
- if authority == "" {
- authority = header.Get("Host")
- }
- needsContinue := header.Get("Expect") == "100-continue"
- if needsContinue {
- header.Del("Expect")
- }
- // Merge Cookie headers into one "; "-delimited value.
- if cookies := header["Cookie"]; len(cookies) > 1 {
- header.Set("Cookie", strings.Join(cookies, "; "))
- }
-
- // Setup Trailers
- var trailer http.Header
- for _, v := range header["Trailer"] {
- for _, key := range strings.Split(v, ",") {
- key = http.CanonicalHeaderKey(strings.TrimSpace(key))
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- // Bogus. (copy of http1 rules)
- // Ignore.
- default:
- if trailer == nil {
- trailer = make(http.Header)
- }
- trailer[key] = nil
- }
- }
- }
- delete(header, "Trailer")
-
- body := &requestBody{
- conn: sc,
- stream: st,
- needsContinue: needsContinue,
- }
- var url_ *url.URL
- var requestURI string
- if isConnect {
- url_ = &url.URL{Host: authority}
- requestURI = authority // mimic HTTP/1 server behavior
- } else {
- var err error
- url_, err = url.ParseRequestURI(path)
- if err != nil {
- return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
- }
- requestURI = path
- }
- req := &http.Request{
- Method: method,
- URL: url_,
- RemoteAddr: sc.remoteAddrStr,
- Header: header,
- RequestURI: requestURI,
- Proto: "HTTP/2.0",
- ProtoMajor: 2,
- ProtoMinor: 0,
- TLS: tlsState,
- Host: authority,
- Body: body,
- Trailer: trailer,
- }
- if bodyOpen {
- body.pipe = &pipe{
- b: &fixedBuffer{buf: make([]byte, initialWindowSize)}, // TODO: garbage
- }
-
- if vv, ok := header["Content-Length"]; ok {
- req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
- } else {
- req.ContentLength = -1
- }
- }
-
- rws := responseWriterStatePool.Get().(*responseWriterState)
- bwSave := rws.bw
- *rws = responseWriterState{} // zero all the fields
- rws.conn = sc
- rws.bw = bwSave
- rws.bw.Reset(chunkWriter{rws})
- rws.stream = st
- rws.req = req
- rws.body = body
-
- rw := &responseWriter{rws: rws}
- return rw, req, nil
-}
-
-// Run on its own goroutine.
-func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- didPanic := true
- defer func() {
- if didPanic {
- e := recover()
- // Same as net/http:
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- sc.writeFrameFromHandler(frameWriteMsg{
- write: handlerPanicRST{rw.rws.stream.id},
- stream: rw.rws.stream,
- })
- sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
- return
- }
- rw.handlerDone()
- }()
- handler(rw, req)
- didPanic = false
-}
-
-func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
- // 10.5.1 Limits on Header Block Size:
- // .. "A server that receives a larger header block than it is
- // willing to handle can send an HTTP 431 (Request Header Fields Too
- // Large) status code"
- const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
- w.WriteHeader(statusRequestHeaderFieldsTooLarge)
- io.WriteString(w, "
HTTP Error 431
Request Header Field(s) Too Large
")
-}
-
-// called from handler goroutines.
-// h may be nil.
-func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
- sc.serveG.checkNotOn() // NOT on
- var errc chan error
- if headerData.h != nil {
- // If there's a header map (which we don't own), so we have to block on
- // waiting for this frame to be written, so an http.Flush mid-handler
- // writes out the correct value of keys, before a handler later potentially
- // mutates it.
- errc = errChanPool.Get().(chan error)
- }
- if err := sc.writeFrameFromHandler(frameWriteMsg{
- write: headerData,
- stream: st,
- done: errc,
- }); err != nil {
- return err
- }
- if errc != nil {
- select {
- case err := <-errc:
- errChanPool.Put(errc)
- return err
- case <-sc.doneServing:
- return errClientDisconnected
- case <-st.cw:
- return errStreamClosed
- }
- }
- return nil
-}
-
-// called from handler goroutines.
-func (sc *serverConn) write100ContinueHeaders(st *stream) {
- sc.writeFrameFromHandler(frameWriteMsg{
- write: write100ContinueHeadersFrame{st.id},
- stream: st,
- })
-}
-
-// A bodyReadMsg tells the server loop that the http.Handler read n
-// bytes of the DATA from the client on the given stream.
-type bodyReadMsg struct {
- st *stream
- n int
-}
-
-// called from handler goroutines.
-// Notes that the handler for the given stream ID read n bytes of its body
-// and schedules flow control tokens to be sent.
-func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
- sc.serveG.checkNotOn() // NOT on
- select {
- case sc.bodyReadCh <- bodyReadMsg{st, n}:
- case <-sc.doneServing:
- }
-}
-
-func (sc *serverConn) noteBodyRead(st *stream, n int) {
- sc.serveG.check()
- sc.sendWindowUpdate(nil, n) // conn-level
- if st.state != stateHalfClosedRemote && st.state != stateClosed {
- // Don't send this WINDOW_UPDATE if the stream is closed
- // remotely.
- sc.sendWindowUpdate(st, n)
- }
-}
-
-// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
- sc.serveG.check()
- // "The legal range for the increment to the flow control
- // window is 1 to 2^31-1 (2,147,483,647) octets."
- // A Go Read call on 64-bit machines could in theory read
- // a larger Read than this. Very unlikely, but we handle it here
- // rather than elsewhere for now.
- const maxUint31 = 1<<31 - 1
- for n >= maxUint31 {
- sc.sendWindowUpdate32(st, maxUint31)
- n -= maxUint31
- }
- sc.sendWindowUpdate32(st, int32(n))
-}
-
-// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
- sc.serveG.check()
- if n == 0 {
- return
- }
- if n < 0 {
- panic("negative update")
- }
- var streamID uint32
- if st != nil {
- streamID = st.id
- }
- sc.writeFrame(frameWriteMsg{
- write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
- stream: st,
- })
- var ok bool
- if st == nil {
- ok = sc.inflow.add(n)
- } else {
- ok = st.inflow.add(n)
- }
- if !ok {
- panic("internal error; sent too many window updates without decrements?")
- }
-}
-
-type requestBody struct {
- stream *stream
- conn *serverConn
- closed bool
- pipe *pipe // non-nil if we have a HTTP entity message body
- needsContinue bool // need to send a 100-continue
-}
-
-func (b *requestBody) Close() error {
- if b.pipe != nil {
- b.pipe.CloseWithError(errClosedBody)
- }
- b.closed = true
- return nil
-}
-
-func (b *requestBody) Read(p []byte) (n int, err error) {
- if b.needsContinue {
- b.needsContinue = false
- b.conn.write100ContinueHeaders(b.stream)
- }
- if b.pipe == nil {
- return 0, io.EOF
- }
- n, err = b.pipe.Read(p)
- if n > 0 {
- b.conn.noteBodyReadFromHandler(b.stream, n)
- }
- return
-}
-
-// responseWriter is the http.ResponseWriter implementation. It's
-// intentionally small (1 pointer wide) to minimize garbage. The
-// responseWriterState pointer inside is zeroed at the end of a
-// request (in handlerDone) and calls on the responseWriter thereafter
-// simply crash (caller's mistake), but the much larger responseWriterState
-// and buffers are reused between multiple requests.
-type responseWriter struct {
- rws *responseWriterState
-}
-
-// Optional http.ResponseWriter interfaces implemented.
-var (
- _ http.CloseNotifier = (*responseWriter)(nil)
- _ http.Flusher = (*responseWriter)(nil)
- _ stringWriter = (*responseWriter)(nil)
-)
-
-type responseWriterState struct {
- // immutable within a request:
- stream *stream
- req *http.Request
- body *requestBody // to close at end of request, if DATA frames didn't
- conn *serverConn
-
- // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
- bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
-
- // mutated by http.Handler goroutine:
- handlerHeader http.Header // nil until called
- snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
- trailers []string // set in writeChunk
- status int // status code passed to WriteHeader
- wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
- sentHeader bool // have we sent the header frame?
- handlerDone bool // handler has finished
-
- sentContentLen int64 // non-zero if handler set a Content-Length header
- wroteBytes int64
-
- closeNotifierMu sync.Mutex // guards closeNotifierCh
- closeNotifierCh chan bool // nil until first used
-}
-
-type chunkWriter struct{ rws *responseWriterState }
-
-func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
-
-func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
-
-// declareTrailer is called for each Trailer header when the
-// response header is written. It notes that a header will need to be
-// written in the trailers at the end of the response.
-func (rws *responseWriterState) declareTrailer(k string) {
- k = http.CanonicalHeaderKey(k)
- switch k {
- case "Transfer-Encoding", "Content-Length", "Trailer":
- // Forbidden by RFC 2616 14.40.
- return
- }
- if !strSliceContains(rws.trailers, k) {
- rws.trailers = append(rws.trailers, k)
- }
-}
-
-// writeChunk writes chunks from the bufio.Writer. But because
-// bufio.Writer may bypass its chunking, sometimes p may be
-// arbitrarily large.
-//
-// writeChunk is also responsible (on the first chunk) for sending the
-// HEADER response.
-func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
- if !rws.wroteHeader {
- rws.writeHeader(200)
- }
-
- isHeadResp := rws.req.Method == "HEAD"
- if !rws.sentHeader {
- rws.sentHeader = true
- var ctype, clen string
- if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
- rws.snapHeader.Del("Content-Length")
- clen64, err := strconv.ParseInt(clen, 10, 64)
- if err == nil && clen64 >= 0 {
- rws.sentContentLen = clen64
- } else {
- clen = ""
- }
- }
- if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
- clen = strconv.Itoa(len(p))
- }
- _, hasContentType := rws.snapHeader["Content-Type"]
- if !hasContentType && bodyAllowedForStatus(rws.status) {
- ctype = http.DetectContentType(p)
- }
- var date string
- if _, ok := rws.snapHeader["Date"]; !ok {
- // TODO(bradfitz): be faster here, like net/http? measure.
- date = time.Now().UTC().Format(http.TimeFormat)
- }
-
- for _, v := range rws.snapHeader["Trailer"] {
- foreachHeaderElement(v, rws.declareTrailer)
- }
-
- endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
- err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
- streamID: rws.stream.id,
- httpResCode: rws.status,
- h: rws.snapHeader,
- endStream: endStream,
- contentType: ctype,
- contentLength: clen,
- date: date,
- })
- if err != nil {
- return 0, err
- }
- if endStream {
- return 0, nil
- }
- }
- if isHeadResp {
- return len(p), nil
- }
- if len(p) == 0 && !rws.handlerDone {
- return 0, nil
- }
-
- if rws.handlerDone {
- rws.promoteUndeclaredTrailers()
- }
-
- endStream := rws.handlerDone && !rws.hasTrailers()
- if len(p) > 0 || endStream {
- // only send a 0 byte DATA frame if we're ending the stream.
- if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
- return 0, err
- }
- }
-
- if rws.handlerDone && rws.hasTrailers() {
- err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
- streamID: rws.stream.id,
- h: rws.handlerHeader,
- trailers: rws.trailers,
- endStream: true,
- })
- return len(p), err
- }
- return len(p), nil
-}
-
-// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
-// that, if present, signals that the map entry is actually for
-// the response trailers, and not the response headers. The prefix
-// is stripped after the ServeHTTP call finishes and the values are
-// sent in the trailers.
-//
-// This mechanism is intended only for trailers that are not known
-// prior to the headers being written. If the set of trailers is fixed
-// or known before the header is written, the normal Go trailers mechanism
-// is preferred:
-// https://golang.org/pkg/net/http/#ResponseWriter
-// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
-const TrailerPrefix = "Trailer:"
-
-// promoteUndeclaredTrailers permits http.Handlers to set trailers
-// after the header has already been flushed. Because the Go
-// ResponseWriter interface has no way to set Trailers (only the
-// Header), and because we didn't want to expand the ResponseWriter
-// interface, and because nobody used trailers, and because RFC 2616
-// says you SHOULD (but not must) predeclare any trailers in the
-// header, the official ResponseWriter rules said trailers in Go must
-// be predeclared, and then we reuse the same ResponseWriter.Header()
-// map to mean both Headers and Trailers. When it's time to write the
-// Trailers, we pick out the fields of Headers that were declared as
-// trailers. That worked for a while, until we found the first major
-// user of Trailers in the wild: gRPC (using them only over http2),
-// and gRPC libraries permit setting trailers mid-stream without
-// predeclarnig them. So: change of plans. We still permit the old
-// way, but we also permit this hack: if a Header() key begins with
-// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
-// invalid token byte anyway, there is no ambiguity. (And it's already
-// filtered out) It's mildly hacky, but not terrible.
-//
-// This method runs after the Handler is done and promotes any Header
-// fields to be trailers.
-func (rws *responseWriterState) promoteUndeclaredTrailers() {
- for k, vv := range rws.handlerHeader {
- if !strings.HasPrefix(k, TrailerPrefix) {
- continue
- }
- trailerKey := strings.TrimPrefix(k, TrailerPrefix)
- rws.declareTrailer(trailerKey)
- rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
- }
- sort.Strings(rws.trailers)
-}
-
-func (w *responseWriter) Flush() {
- rws := w.rws
- if rws == nil {
- panic("Header called after Handler finished")
- }
- if rws.bw.Buffered() > 0 {
- if err := rws.bw.Flush(); err != nil {
- // Ignore the error. The frame writer already knows.
- return
- }
- } else {
- // The bufio.Writer won't call chunkWriter.Write
- // (writeChunk with zero bytes, so we have to do it
- // ourselves to force the HTTP response header and/or
- // final DATA frame (with END_STREAM) to be sent.
- rws.writeChunk(nil)
- }
-}
-
-func (w *responseWriter) CloseNotify() <-chan bool {
- rws := w.rws
- if rws == nil {
- panic("CloseNotify called after Handler finished")
- }
- rws.closeNotifierMu.Lock()
- ch := rws.closeNotifierCh
- if ch == nil {
- ch = make(chan bool, 1)
- rws.closeNotifierCh = ch
- go func() {
- rws.stream.cw.Wait() // wait for close
- ch <- true
- }()
- }
- rws.closeNotifierMu.Unlock()
- return ch
-}
-
-func (w *responseWriter) Header() http.Header {
- rws := w.rws
- if rws == nil {
- panic("Header called after Handler finished")
- }
- if rws.handlerHeader == nil {
- rws.handlerHeader = make(http.Header)
- }
- return rws.handlerHeader
-}
-
-func (w *responseWriter) WriteHeader(code int) {
- rws := w.rws
- if rws == nil {
- panic("WriteHeader called after Handler finished")
- }
- rws.writeHeader(code)
-}
-
-func (rws *responseWriterState) writeHeader(code int) {
- if !rws.wroteHeader {
- rws.wroteHeader = true
- rws.status = code
- if len(rws.handlerHeader) > 0 {
- rws.snapHeader = cloneHeader(rws.handlerHeader)
- }
- }
-}
-
-func cloneHeader(h http.Header) http.Header {
- h2 := make(http.Header, len(h))
- for k, vv := range h {
- vv2 := make([]string, len(vv))
- copy(vv2, vv)
- h2[k] = vv2
- }
- return h2
-}
-
-// The Life Of A Write is like this:
-//
-// * Handler calls w.Write or w.WriteString ->
-// * -> rws.bw (*bufio.Writer) ->
-// * (Handler migth call Flush)
-// * -> chunkWriter{rws}
-// * -> responseWriterState.writeChunk(p []byte)
-// * -> responseWriterState.writeChunk (most of the magic; see comment there)
-func (w *responseWriter) Write(p []byte) (n int, err error) {
- return w.write(len(p), p, "")
-}
-
-func (w *responseWriter) WriteString(s string) (n int, err error) {
- return w.write(len(s), nil, s)
-}
-
-// either dataB or dataS is non-zero.
-func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
- rws := w.rws
- if rws == nil {
- panic("Write called after Handler finished")
- }
- if !rws.wroteHeader {
- w.WriteHeader(200)
- }
- if !bodyAllowedForStatus(rws.status) {
- return 0, http.ErrBodyNotAllowed
- }
- rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
- if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
- // TODO: send a RST_STREAM
- return 0, errors.New("http2: handler wrote more than declared Content-Length")
- }
-
- if dataB != nil {
- return rws.bw.Write(dataB)
- } else {
- return rws.bw.WriteString(dataS)
- }
-}
-
-func (w *responseWriter) handlerDone() {
- rws := w.rws
- rws.handlerDone = true
- w.Flush()
- w.rws = nil
- responseWriterStatePool.Put(rws)
-}
-
-// foreachHeaderElement splits v according to the "#rule" construction
-// in RFC 2616 section 2.1 and calls fn for each non-empty element.
-func foreachHeaderElement(v string, fn func(string)) {
- v = textproto.TrimString(v)
- if v == "" {
- return
- }
- if !strings.Contains(v, ",") {
- fn(v)
- return
- }
- for _, f := range strings.Split(v, ",") {
- if f = textproto.TrimString(f); f != "" {
- fn(f)
- }
- }
-}
diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go
deleted file mode 100644
index cd3e91a2..00000000
--- a/vendor/golang.org/x/net/http2/server_test.go
+++ /dev/null
@@ -1,3102 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bytes"
- "crypto/tls"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/http/httptest"
- "os"
- "os/exec"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "golang.org/x/net/http2/hpack"
-)
-
-var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered")
-
-func stderrv() io.Writer {
- if *stderrVerbose {
- return os.Stderr
- }
-
- return ioutil.Discard
-}
-
-type serverTester struct {
- cc net.Conn // client conn
- t testing.TB
- ts *httptest.Server
- fr *Framer
- logBuf *bytes.Buffer
- logFilter []string // substrings to filter out
- scMu sync.Mutex // guards sc
- sc *serverConn
- hpackDec *hpack.Decoder
- decodedHeaders [][2]string
-
- // writing headers:
- headerBuf bytes.Buffer
- hpackEnc *hpack.Encoder
-
- // reading frames:
- frc chan Frame
- frErrc chan error
- readTimer *time.Timer
-}
-
-func init() {
- testHookOnPanicMu = new(sync.Mutex)
-}
-
-func resetHooks() {
- testHookOnPanicMu.Lock()
- testHookOnPanic = nil
- testHookOnPanicMu.Unlock()
-}
-
-type serverTesterOpt string
-
-var optOnlyServer = serverTesterOpt("only_server")
-var optQuiet = serverTesterOpt("quiet_logging")
-
-func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
- resetHooks()
-
- logBuf := new(bytes.Buffer)
- ts := httptest.NewUnstartedServer(handler)
-
- tlsConfig := &tls.Config{
- InsecureSkipVerify: true,
- // The h2-14 is temporary, until curl is updated. (as used by unit tests
- // in Docker)
- NextProtos: []string{NextProtoTLS, "h2-14"},
- }
-
- var onlyServer, quiet bool
- for _, opt := range opts {
- switch v := opt.(type) {
- case func(*tls.Config):
- v(tlsConfig)
- case func(*httptest.Server):
- v(ts)
- case serverTesterOpt:
- switch v {
- case optOnlyServer:
- onlyServer = true
- case optQuiet:
- quiet = true
- }
- default:
- t.Fatalf("unknown newServerTester option type %T", v)
- }
- }
-
- ConfigureServer(ts.Config, &Server{})
-
- st := &serverTester{
- t: t,
- ts: ts,
- logBuf: logBuf,
- frc: make(chan Frame, 1),
- frErrc: make(chan error, 1),
- }
- st.hpackEnc = hpack.NewEncoder(&st.headerBuf)
- st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField)
-
- ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
- if quiet {
- ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
- } else {
- ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, logBuf), "", log.LstdFlags)
- }
- ts.StartTLS()
-
- if VerboseLogs {
- t.Logf("Running test server at: %s", ts.URL)
- }
- testHookGetServerConn = func(v *serverConn) {
- st.scMu.Lock()
- defer st.scMu.Unlock()
- st.sc = v
- st.sc.testHookCh = make(chan func(int))
- }
- log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st}))
- if !onlyServer {
- cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
- if err != nil {
- t.Fatal(err)
- }
- st.cc = cc
- st.fr = NewFramer(cc, cc)
- }
- return st
-}
-
-func (st *serverTester) closeConn() {
- st.scMu.Lock()
- defer st.scMu.Unlock()
- st.sc.conn.Close()
-}
-
-func (st *serverTester) addLogFilter(phrase string) {
- st.logFilter = append(st.logFilter, phrase)
-}
-
-func (st *serverTester) stream(id uint32) *stream {
- ch := make(chan *stream, 1)
- st.sc.testHookCh <- func(int) {
- ch <- st.sc.streams[id]
- }
- return <-ch
-}
-
-func (st *serverTester) streamState(id uint32) streamState {
- ch := make(chan streamState, 1)
- st.sc.testHookCh <- func(int) {
- state, _ := st.sc.state(id)
- ch <- state
- }
- return <-ch
-}
-
-// loopNum reports how many times this conn's select loop has gone around.
-func (st *serverTester) loopNum() int {
- lastc := make(chan int, 1)
- st.sc.testHookCh <- func(loopNum int) {
- lastc <- loopNum
- }
- return <-lastc
-}
-
-// awaitIdle heuristically awaits for the server conn's select loop to be idle.
-// The heuristic is that the server connection's serve loop must schedule
-// 50 times in a row without any channel sends or receives occuring.
-func (st *serverTester) awaitIdle() {
- remain := 50
- last := st.loopNum()
- for remain > 0 {
- n := st.loopNum()
- if n == last+1 {
- remain--
- } else {
- remain = 50
- }
- last = n
- }
-}
-
-func (st *serverTester) Close() {
- st.ts.Close()
- if st.cc != nil {
- st.cc.Close()
- }
- log.SetOutput(os.Stderr)
-}
-
-// greet initiates the client's HTTP/2 connection into a state where
-// frames may be sent.
-func (st *serverTester) greet() {
- st.writePreface()
- st.writeInitialSettings()
- st.wantSettings()
- st.writeSettingsAck()
- st.wantSettingsAck()
-}
-
-func (st *serverTester) writePreface() {
- n, err := st.cc.Write(clientPreface)
- if err != nil {
- st.t.Fatalf("Error writing client preface: %v", err)
- }
- if n != len(clientPreface) {
- st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface))
- }
-}
-
-func (st *serverTester) writeInitialSettings() {
- if err := st.fr.WriteSettings(); err != nil {
- st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err)
- }
-}
-
-func (st *serverTester) writeSettingsAck() {
- if err := st.fr.WriteSettingsAck(); err != nil {
- st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err)
- }
-}
-
-func (st *serverTester) writeHeaders(p HeadersFrameParam) {
- if err := st.fr.WriteHeaders(p); err != nil {
- st.t.Fatalf("Error writing HEADERS: %v", err)
- }
-}
-
-func (st *serverTester) encodeHeaderField(k, v string) {
- err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
- if err != nil {
- st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
- }
-}
-
-// encodeHeaderRaw is the magic-free version of encodeHeader.
-// It takes 0 or more (k, v) pairs and encodes them.
-func (st *serverTester) encodeHeaderRaw(headers ...string) []byte {
- if len(headers)%2 == 1 {
- panic("odd number of kv args")
- }
- st.headerBuf.Reset()
- for len(headers) > 0 {
- k, v := headers[0], headers[1]
- st.encodeHeaderField(k, v)
- headers = headers[2:]
- }
- return st.headerBuf.Bytes()
-}
-
-// encodeHeader encodes headers and returns their HPACK bytes. headers
-// must contain an even number of key/value pairs. There may be
-// multiple pairs for keys (e.g. "cookie"). The :method, :path, and
-// :scheme headers default to GET, / and https.
-func (st *serverTester) encodeHeader(headers ...string) []byte {
- if len(headers)%2 == 1 {
- panic("odd number of kv args")
- }
-
- st.headerBuf.Reset()
-
- if len(headers) == 0 {
- // Fast path, mostly for benchmarks, so test code doesn't pollute
- // profiles when we're looking to improve server allocations.
- st.encodeHeaderField(":method", "GET")
- st.encodeHeaderField(":path", "/")
- st.encodeHeaderField(":scheme", "https")
- return st.headerBuf.Bytes()
- }
-
- if len(headers) == 2 && headers[0] == ":method" {
- // Another fast path for benchmarks.
- st.encodeHeaderField(":method", headers[1])
- st.encodeHeaderField(":path", "/")
- st.encodeHeaderField(":scheme", "https")
- return st.headerBuf.Bytes()
- }
-
- pseudoCount := map[string]int{}
- keys := []string{":method", ":path", ":scheme"}
- vals := map[string][]string{
- ":method": {"GET"},
- ":path": {"/"},
- ":scheme": {"https"},
- }
- for len(headers) > 0 {
- k, v := headers[0], headers[1]
- headers = headers[2:]
- if _, ok := vals[k]; !ok {
- keys = append(keys, k)
- }
- if strings.HasPrefix(k, ":") {
- pseudoCount[k]++
- if pseudoCount[k] == 1 {
- vals[k] = []string{v}
- } else {
- // Allows testing of invalid headers w/ dup pseudo fields.
- vals[k] = append(vals[k], v)
- }
- } else {
- vals[k] = append(vals[k], v)
- }
- }
- for _, k := range keys {
- for _, v := range vals[k] {
- st.encodeHeaderField(k, v)
- }
- }
- return st.headerBuf.Bytes()
-}
-
-// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set.
-func (st *serverTester) bodylessReq1(headers ...string) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(headers...),
- EndStream: true,
- EndHeaders: true,
- })
-}
-
-func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) {
- if err := st.fr.WriteData(streamID, endStream, data); err != nil {
- st.t.Fatalf("Error writing DATA: %v", err)
- }
-}
-
-func (st *serverTester) readFrame() (Frame, error) {
- go func() {
- fr, err := st.fr.ReadFrame()
- if err != nil {
- st.frErrc <- err
- } else {
- st.frc <- fr
- }
- }()
- t := st.readTimer
- if t == nil {
- t = time.NewTimer(2 * time.Second)
- st.readTimer = t
- }
- t.Reset(2 * time.Second)
- defer t.Stop()
- select {
- case f := <-st.frc:
- return f, nil
- case err := <-st.frErrc:
- return nil, err
- case <-t.C:
- return nil, errors.New("timeout waiting for frame")
- }
-}
-
-func (st *serverTester) wantHeaders() *HeadersFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a HEADERS frame: %v", err)
- }
- hf, ok := f.(*HeadersFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *HeadersFrame", f)
- }
- return hf
-}
-
-func (st *serverTester) wantContinuation() *ContinuationFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err)
- }
- cf, ok := f.(*ContinuationFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *ContinuationFrame", f)
- }
- return cf
-}
-
-func (st *serverTester) wantData() *DataFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a DATA frame: %v", err)
- }
- df, ok := f.(*DataFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *DataFrame", f)
- }
- return df
-}
-
-func (st *serverTester) wantSettings() *SettingsFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err)
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *SettingsFrame", f)
- }
- return sf
-}
-
-func (st *serverTester) wantPing() *PingFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a PING frame: %v", err)
- }
- pf, ok := f.(*PingFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *PingFrame", f)
- }
- return pf
-}
-
-func (st *serverTester) wantGoAway() *GoAwayFrame {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err)
- }
- gf, ok := f.(*GoAwayFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *GoAwayFrame", f)
- }
- return gf
-}
-
-func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting an RSTStream frame: %v", err)
- }
- rs, ok := f.(*RSTStreamFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *RSTStreamFrame", f)
- }
- if rs.FrameHeader.StreamID != streamID {
- st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID)
- }
- if rs.ErrCode != errCode {
- st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode)
- }
-}
-
-func (st *serverTester) wantWindowUpdate(streamID, incr uint32) {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err)
- }
- wu, ok := f.(*WindowUpdateFrame)
- if !ok {
- st.t.Fatalf("got a %T; want *WindowUpdateFrame", f)
- }
- if wu.FrameHeader.StreamID != streamID {
- st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID)
- }
- if wu.Increment != incr {
- st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr)
- }
-}
-
-func (st *serverTester) wantSettingsAck() {
- f, err := st.readFrame()
- if err != nil {
- st.t.Fatal(err)
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- st.t.Fatalf("Wanting a settings ACK, received a %T", f)
- }
- if !sf.Header().Flags.Has(FlagSettingsAck) {
- st.t.Fatal("Settings Frame didn't have ACK set")
- }
-
-}
-
-func TestServer(t *testing.T) {
- gotReq := make(chan bool, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Foo", "Bar")
- gotReq <- true
- })
- defer st.Close()
-
- covers("3.5", `
- The server connection preface consists of a potentially empty
- SETTINGS frame ([SETTINGS]) that MUST be the first frame the
- server sends in the HTTP/2 connection.
- `)
-
- st.writePreface()
- st.writeInitialSettings()
- st.wantSettings()
- st.writeSettingsAck()
- st.wantSettingsAck()
-
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(),
- EndStream: true, // no DATA frames
- EndHeaders: true,
- })
-
- select {
- case <-gotReq:
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for request")
- }
-}
-
-func TestServer_Request_Get(t *testing.T) {
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader("foo-bar", "some-value"),
- EndStream: true, // no DATA frames
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.Method != "GET" {
- t.Errorf("Method = %q; want GET", r.Method)
- }
- if r.URL.Path != "/" {
- t.Errorf("URL.Path = %q; want /", r.URL.Path)
- }
- if r.ContentLength != 0 {
- t.Errorf("ContentLength = %v; want 0", r.ContentLength)
- }
- if r.Close {
- t.Error("Close = true; want false")
- }
- if !strings.Contains(r.RemoteAddr, ":") {
- t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr)
- }
- if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 {
- t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor)
- }
- wantHeader := http.Header{
- "Foo-Bar": []string{"some-value"},
- }
- if !reflect.DeepEqual(r.Header, wantHeader) {
- t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
- }
- if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
- t.Errorf("Read = %d, %v; want 0, EOF", n, err)
- }
- })
-}
-
-func TestServer_Request_Get_PathSlashes(t *testing.T) {
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":path", "/%2f/"),
- EndStream: true, // no DATA frames
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.RequestURI != "/%2f/" {
- t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI)
- }
- if r.URL.Path != "///" {
- t.Errorf("URL.Path = %q; want ///", r.URL.Path)
- }
- })
-}
-
-// TODO: add a test with EndStream=true on the HEADERS but setting a
-// Content-Length anyway. Should we just omit it and force it to
-// zero?
-
-func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) {
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: true,
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.Method != "POST" {
- t.Errorf("Method = %q; want POST", r.Method)
- }
- if r.ContentLength != 0 {
- t.Errorf("ContentLength = %v; want 0", r.ContentLength)
- }
- if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
- t.Errorf("Read = %d, %v; want 0, EOF", n, err)
- }
- })
-}
-
-func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) {
- testBodyContents(t, -1, "", func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, nil) // just kidding. empty body.
- })
-}
-
-func TestServer_Request_Post_Body_OneData(t *testing.T) {
- const content = "Some content"
- testBodyContents(t, -1, content, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, []byte(content))
- })
-}
-
-func TestServer_Request_Post_Body_TwoData(t *testing.T) {
- const content = "Some content"
- testBodyContents(t, -1, content, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, false, []byte(content[:5]))
- st.writeData(1, true, []byte(content[5:]))
- })
-}
-
-func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) {
- const content = "Some content"
- testBodyContents(t, int64(len(content)), content, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(
- ":method", "POST",
- "content-length", strconv.Itoa(len(content)),
- ),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, []byte(content))
- })
-}
-
-func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) {
- testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes",
- func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(
- ":method", "POST",
- "content-length", "3",
- ),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, []byte("12"))
- })
-}
-
-func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) {
- testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes",
- func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(
- ":method", "POST",
- "content-length", "4",
- ),
- EndStream: false, // to say DATA frames are coming
- EndHeaders: true,
- })
- st.writeData(1, true, []byte("12345"))
- })
-}
-
-func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) {
- testServerRequest(t, write, func(r *http.Request) {
- if r.Method != "POST" {
- t.Errorf("Method = %q; want POST", r.Method)
- }
- if r.ContentLength != wantContentLength {
- t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
- }
- all, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Fatal(err)
- }
- if string(all) != wantBody {
- t.Errorf("Read = %q; want %q", all, wantBody)
- }
- if err := r.Body.Close(); err != nil {
- t.Fatalf("Close: %v", err)
- }
- })
-}
-
-func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) {
- testServerRequest(t, write, func(r *http.Request) {
- if r.Method != "POST" {
- t.Errorf("Method = %q; want POST", r.Method)
- }
- if r.ContentLength != wantContentLength {
- t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
- }
- all, err := ioutil.ReadAll(r.Body)
- if err == nil {
- t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.",
- wantReadError, all)
- }
- if !strings.Contains(err.Error(), wantReadError) {
- t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError)
- }
- if err := r.Body.Close(); err != nil {
- t.Fatalf("Close: %v", err)
- }
- })
-}
-
-// Using a Host header, instead of :authority
-func TestServer_Request_Get_Host(t *testing.T) {
- const host = "example.com"
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader("host", host),
- EndStream: true,
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.Host != host {
- t.Errorf("Host = %q; want %q", r.Host, host)
- }
- })
-}
-
-// Using an :authority pseudo-header, instead of Host
-func TestServer_Request_Get_Authority(t *testing.T) {
- const host = "example.com"
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":authority", host),
- EndStream: true,
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if r.Host != host {
- t.Errorf("Host = %q; want %q", r.Host, host)
- }
- })
-}
-
-func TestServer_Request_WithContinuation(t *testing.T) {
- wantHeader := http.Header{
- "Foo-One": []string{"value-one"},
- "Foo-Two": []string{"value-two"},
- "Foo-Three": []string{"value-three"},
- }
- testServerRequest(t, func(st *serverTester) {
- fullHeaders := st.encodeHeader(
- "foo-one", "value-one",
- "foo-two", "value-two",
- "foo-three", "value-three",
- )
- remain := fullHeaders
- chunks := 0
- for len(remain) > 0 {
- const maxChunkSize = 5
- chunk := remain
- if len(chunk) > maxChunkSize {
- chunk = chunk[:maxChunkSize]
- }
- remain = remain[len(chunk):]
-
- if chunks == 0 {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: chunk,
- EndStream: true, // no DATA frames
- EndHeaders: false, // we'll have continuation frames
- })
- } else {
- err := st.fr.WriteContinuation(1, len(remain) == 0, chunk)
- if err != nil {
- t.Fatal(err)
- }
- }
- chunks++
- }
- if chunks < 2 {
- t.Fatal("too few chunks")
- }
- }, func(r *http.Request) {
- if !reflect.DeepEqual(r.Header, wantHeader) {
- t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
- }
- })
-}
-
-// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field")
-func TestServer_Request_CookieConcat(t *testing.T) {
- const host = "example.com"
- testServerRequest(t, func(st *serverTester) {
- st.bodylessReq1(
- ":authority", host,
- "cookie", "a=b",
- "cookie", "c=d",
- "cookie", "e=f",
- )
- }, func(r *http.Request) {
- const want = "a=b; c=d; e=f"
- if got := r.Header.Get("Cookie"); got != want {
- t.Errorf("Cookie = %q; want %q", got, want)
- }
- })
-}
-
-func TestServer_Request_Reject_CapitalHeader(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") })
-}
-
-func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") })
-}
-
-func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") })
-}
-
-func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") })
-}
-
-func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") })
-}
-
-func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") })
-}
-
-func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") })
-}
-
-func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") })
-}
-
-func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) {
- // 8.1.2.3 Request Pseudo-Header Fields
- // "All HTTP/2 requests MUST include exactly one valid value" ...
- testRejectRequest(t, func(st *serverTester) {
- st.addLogFilter("duplicate pseudo-header")
- st.bodylessReq1(":method", "GET", ":method", "POST")
- })
-}
-
-func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) {
- // 8.1.2.3 Request Pseudo-Header Fields
- // "All pseudo-header fields MUST appear in the header block
- // before regular header fields. Any request or response that
- // contains a pseudo-header field that appears in a header
- // block after a regular header field MUST be treated as
- // malformed (Section 8.1.2.6)."
- testRejectRequest(t, func(st *serverTester) {
- st.addLogFilter("pseudo-header after regular header")
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"})
- enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"})
- enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"})
- enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"})
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: buf.Bytes(),
- EndStream: true,
- EndHeaders: true,
- })
- })
-}
-
-func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") })
-}
-
-func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") })
-}
-
-func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") })
-}
-
-func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) {
- testRejectRequest(t, func(st *serverTester) {
- st.addLogFilter(`invalid pseudo-header ":unknown_thing"`)
- st.bodylessReq1(":unknown_thing", "")
- })
-}
-
-func testRejectRequest(t *testing.T, send func(*serverTester)) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- t.Fatal("server request made it to handler; should've been rejected")
- })
- defer st.Close()
-
- st.greet()
- send(st)
- st.wantRSTStream(1, ErrCodeProtocol)
-}
-
-func TestServer_Request_Connect(t *testing.T) {
- testServerRequest(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeaderRaw(
- ":method", "CONNECT",
- ":authority", "example.com:123",
- ),
- EndStream: true,
- EndHeaders: true,
- })
- }, func(r *http.Request) {
- if g, w := r.Method, "CONNECT"; g != w {
- t.Errorf("Method = %q; want %q", g, w)
- }
- if g, w := r.RequestURI, "example.com:123"; g != w {
- t.Errorf("RequestURI = %q; want %q", g, w)
- }
- if g, w := r.URL.Host, "example.com:123"; g != w {
- t.Errorf("URL.Host = %q; want %q", g, w)
- }
- })
-}
-
-func TestServer_Request_Connect_InvalidPath(t *testing.T) {
- testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeaderRaw(
- ":method", "CONNECT",
- ":authority", "example.com:123",
- ":path", "/bogus",
- ),
- EndStream: true,
- EndHeaders: true,
- })
- })
-}
-
-func TestServer_Request_Connect_InvalidScheme(t *testing.T) {
- testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeaderRaw(
- ":method", "CONNECT",
- ":authority", "example.com:123",
- ":scheme", "https",
- ),
- EndStream: true,
- EndHeaders: true,
- })
- })
-}
-
-func TestServer_Ping(t *testing.T) {
- st := newServerTester(t, nil)
- defer st.Close()
- st.greet()
-
- // Server should ignore this one, since it has ACK set.
- ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
- if err := st.fr.WritePing(true, ackPingData); err != nil {
- t.Fatal(err)
- }
-
- // But the server should reply to this one, since ACK is false.
- pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
- if err := st.fr.WritePing(false, pingData); err != nil {
- t.Fatal(err)
- }
-
- pf := st.wantPing()
- if !pf.Flags.Has(FlagPingAck) {
- t.Error("response ping doesn't have ACK set")
- }
- if pf.Data != pingData {
- t.Errorf("response ping has data %q; want %q", pf.Data, pingData)
- }
-}
-
-func TestServer_RejectsLargeFrames(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("see golang.org/issue/13434")
- }
-
- st := newServerTester(t, nil)
- defer st.Close()
- st.greet()
-
- // Write too large of a frame (too large by one byte)
- // We ignore the return value because it's expected that the server
- // will only read the first 9 bytes (the headre) and then disconnect.
- st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1))
-
- gf := st.wantGoAway()
- if gf.ErrCode != ErrCodeFrameSize {
- t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize)
- }
- if st.logBuf.Len() != 0 {
- // Previously we spun here for a bit until the GOAWAY disconnect
- // timer fired, logging while we fired.
- t.Errorf("unexpected server output: %.500s\n", st.logBuf.Bytes())
- }
-}
-
-func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
- puppet := newHandlerPuppet()
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- puppet.act(w, r)
- })
- defer st.Close()
- defer puppet.done()
-
- st.greet()
-
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // data coming
- EndHeaders: true,
- })
- st.writeData(1, false, []byte("abcdef"))
- puppet.do(readBodyHandler(t, "abc"))
- st.wantWindowUpdate(0, 3)
- st.wantWindowUpdate(1, 3)
-
- puppet.do(readBodyHandler(t, "def"))
- st.wantWindowUpdate(0, 3)
- st.wantWindowUpdate(1, 3)
-
- st.writeData(1, true, []byte("ghijkl")) // END_STREAM here
- puppet.do(readBodyHandler(t, "ghi"))
- puppet.do(readBodyHandler(t, "jkl"))
- st.wantWindowUpdate(0, 3)
- st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM
-}
-
-func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) {
- st := newServerTester(t, nil)
- defer st.Close()
- st.greet()
- if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil {
- t.Fatal(err)
- }
- gf := st.wantGoAway()
- if gf.ErrCode != ErrCodeFlowControl {
- t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl)
- }
- if gf.LastStreamID != 0 {
- t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0)
- }
-}
-
-func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) {
- inHandler := make(chan bool)
- blockHandler := make(chan bool)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- inHandler <- true
- <-blockHandler
- })
- defer st.Close()
- defer close(blockHandler)
- st.greet()
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // keep it open
- EndHeaders: true,
- })
- <-inHandler
- // Send a bogus window update:
- if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil {
- t.Fatal(err)
- }
- st.wantRSTStream(1, ErrCodeFlowControl)
-}
-
-// testServerPostUnblock sends a hanging POST with unsent data to handler,
-// then runs fn once in the handler, and verifies that the error returned from
-// handler is acceptable. It fails if takes over 5 seconds for handler to exit.
-func testServerPostUnblock(t *testing.T,
- handler func(http.ResponseWriter, *http.Request) error,
- fn func(*serverTester),
- checkErr func(error),
- otherHeaders ...string) {
- inHandler := make(chan bool)
- errc := make(chan error, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- inHandler <- true
- errc <- handler(w, r)
- })
- st.greet()
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...),
- EndStream: false, // keep it open
- EndHeaders: true,
- })
- <-inHandler
- fn(st)
- select {
- case err := <-errc:
- if checkErr != nil {
- checkErr(err)
- }
- case <-time.After(5 * time.Second):
- t.Fatal("timeout waiting for Handler to return")
- }
- st.Close()
-}
-
-func TestServer_RSTStream_Unblocks_Read(t *testing.T) {
- testServerPostUnblock(t,
- func(w http.ResponseWriter, r *http.Request) (err error) {
- _, err = r.Body.Read(make([]byte, 1))
- return
- },
- func(st *serverTester) {
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- },
- func(err error) {
- want := StreamError{StreamID: 0x1, Code: 0x8}
- if !reflect.DeepEqual(err, want) {
- t.Errorf("Read error = %v; want %v", err, want)
- }
- },
- )
-}
-
-func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) {
- // Run this test a bunch, because it doesn't always
- // deadlock. But with a bunch, it did.
- n := 50
- if testing.Short() {
- n = 5
- }
- for i := 0; i < n; i++ {
- testServer_RSTStream_Unblocks_Header_Write(t)
- }
-}
-
-func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) {
- inHandler := make(chan bool, 1)
- unblockHandler := make(chan bool, 1)
- headerWritten := make(chan bool, 1)
- wroteRST := make(chan bool, 1)
-
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- inHandler <- true
- <-wroteRST
- w.Header().Set("foo", "bar")
- w.WriteHeader(200)
- w.(http.Flusher).Flush()
- headerWritten <- true
- <-unblockHandler
- })
- defer st.Close()
-
- st.greet()
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // keep it open
- EndHeaders: true,
- })
- <-inHandler
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- wroteRST <- true
- st.awaitIdle()
- select {
- case <-headerWritten:
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for header write")
- }
- unblockHandler <- true
-}
-
-func TestServer_DeadConn_Unblocks_Read(t *testing.T) {
- testServerPostUnblock(t,
- func(w http.ResponseWriter, r *http.Request) (err error) {
- _, err = r.Body.Read(make([]byte, 1))
- return
- },
- func(st *serverTester) { st.cc.Close() },
- func(err error) {
- if err == nil {
- t.Error("unexpected nil error from Request.Body.Read")
- }
- },
- )
-}
-
-var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error {
- <-w.(http.CloseNotifier).CloseNotify()
- return nil
-}
-
-func TestServer_CloseNotify_After_RSTStream(t *testing.T) {
- testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- }, nil)
-}
-
-func TestServer_CloseNotify_After_ConnClose(t *testing.T) {
- testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil)
-}
-
-// that CloseNotify unblocks after a stream error due to the client's
-// problem that's unrelated to them explicitly canceling it (which is
-// TestServer_CloseNotify_After_RSTStream above)
-func TestServer_CloseNotify_After_StreamError(t *testing.T) {
- testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
- // data longer than declared Content-Length => stream error
- st.writeData(1, true, []byte("1234"))
- }, nil, "content-length", "3")
-}
-
-func TestServer_StateTransitions(t *testing.T) {
- var st *serverTester
- inHandler := make(chan bool)
- writeData := make(chan bool)
- leaveHandler := make(chan bool)
- st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- inHandler <- true
- if st.stream(1) == nil {
- t.Errorf("nil stream 1 in handler")
- }
- if got, want := st.streamState(1), stateOpen; got != want {
- t.Errorf("in handler, state is %v; want %v", got, want)
- }
- writeData <- true
- if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF {
- t.Errorf("body read = %d, %v; want 0, EOF", n, err)
- }
- if got, want := st.streamState(1), stateHalfClosedRemote; got != want {
- t.Errorf("in handler, state is %v; want %v", got, want)
- }
-
- <-leaveHandler
- })
- st.greet()
- if st.stream(1) != nil {
- t.Fatal("stream 1 should be empty")
- }
- if got := st.streamState(1); got != stateIdle {
- t.Fatalf("stream 1 should be idle; got %v", got)
- }
-
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false, // keep it open
- EndHeaders: true,
- })
- <-inHandler
- <-writeData
- st.writeData(1, true, nil)
-
- leaveHandler <- true
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("expected END_STREAM flag")
- }
-
- if got, want := st.streamState(1), stateClosed; got != want {
- t.Errorf("at end, state is %v; want %v", got, want)
- }
- if st.stream(1) != nil {
- t.Fatal("at end, stream 1 should be gone")
- }
-}
-
-// test HEADERS w/o EndHeaders + another HEADERS (should get rejected)
-func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- st.writeHeaders(HeadersFrameParam{ // Not a continuation.
- StreamID: 3, // different stream.
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- })
-}
-
-// test HEADERS w/o EndHeaders + PING (should get rejected)
-func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- if err := st.fr.WritePing(false, [8]byte{}); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected)
-func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- st.wantHeaders()
- if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID
-func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: false,
- })
- if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// No HEADERS on stream 0.
-func TestServer_Rejects_Headers0(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.fr.AllowIllegalWrites = true
- st.writeHeaders(HeadersFrameParam{
- StreamID: 0,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- })
-}
-
-// No CONTINUATION on stream 0.
-func TestServer_Rejects_Continuation0(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- st.fr.AllowIllegalWrites = true
- if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-func TestServer_Rejects_PushPromise(t *testing.T) {
- testServerRejectsConn(t, func(st *serverTester) {
- pp := PushPromiseParam{
- StreamID: 1,
- PromiseID: 3,
- }
- if err := st.fr.WritePushPromise(pp); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// testServerRejectsConn tests that the server hangs up with a GOAWAY
-// frame and a server close after the client does something
-// deserving a CONNECTION_ERROR.
-func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
- st.addLogFilter("connection error: PROTOCOL_ERROR")
- defer st.Close()
- st.greet()
- writeReq(st)
-
- st.wantGoAway()
- errc := make(chan error, 1)
- go func() {
- fr, err := st.fr.ReadFrame()
- if err == nil {
- err = fmt.Errorf("got frame of type %T", fr)
- }
- errc <- err
- }()
- select {
- case err := <-errc:
- if err != io.EOF {
- t.Errorf("ReadFrame = %v; want io.EOF", err)
- }
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for disconnect")
- }
-}
-
-// testServerRejectsStream tests that the server sends a RST_STREAM with the provided
-// error code after a client sends a bogus request.
-func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
- defer st.Close()
- st.greet()
- writeReq(st)
- st.wantRSTStream(1, code)
-}
-
-// testServerRequest sets up an idle HTTP/2 connection and lets you
-// write a single request with writeReq, and then verify that the
-// *http.Request is built correctly in checkReq.
-func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) {
- gotReq := make(chan bool, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- if r.Body == nil {
- t.Fatal("nil Body")
- }
- checkReq(r)
- gotReq <- true
- })
- defer st.Close()
-
- st.greet()
- writeReq(st)
-
- select {
- case <-gotReq:
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for request")
- }
-}
-
-func getSlash(st *serverTester) { st.bodylessReq1() }
-
-func TestServer_Response_NoData(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- // Nothing.
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("want END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- })
-}
-
-func TestServer_Response_NoData_Header_FooBar(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("Foo-Bar", "some-value")
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Fatal("want END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"foo-bar", "some-value"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", "0"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- })
-}
-
-func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) {
- const msg = "this is HTML."
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("Content-Type", "foo/bar")
- io.WriteString(w, msg)
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("don't want END_STREAM, expecting data")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "foo/bar"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("expected DATA to have END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
- })
-}
-
-func TestServer_Response_TransferEncoding_chunked(t *testing.T) {
- const msg = "hi"
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("Transfer-Encoding", "chunked") // should be stripped
- io.WriteString(w, msg)
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- })
-}
-
-// Header accessed only after the initial write.
-func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) {
- const msg = "this is HTML."
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- io.WriteString(w, msg)
- w.Header().Set("foo", "should be ignored")
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- })
-}
-
-// Header accessed before the initial write and later mutated.
-func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) {
- const msg = "this is HTML."
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("foo", "proper value")
- io.WriteString(w, msg)
- w.Header().Set("foo", "should be ignored")
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"foo", "proper value"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- })
-}
-
-func TestServer_Response_Data_SniffLenType(t *testing.T) {
- const msg = "this is HTML."
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- io.WriteString(w, msg)
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("don't want END_STREAM, expecting data")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", strconv.Itoa(len(msg))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("expected DATA to have END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
- })
-}
-
-func TestServer_Response_Header_Flush_MidWrite(t *testing.T) {
- const msg = "this is HTML"
- const msg2 = ", and this is the next chunk"
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- io.WriteString(w, msg)
- w.(http.Flusher).Flush()
- io.WriteString(w, msg2)
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/html; charset=utf-8"}, // sniffed
- // and no content-length
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- {
- df := st.wantData()
- if df.StreamEnded() {
- t.Error("unexpected END_STREAM flag")
- }
- if got := string(df.Data()); got != msg {
- t.Errorf("got DATA %q; want %q", got, msg)
- }
- }
- {
- df := st.wantData()
- if !df.StreamEnded() {
- t.Error("wanted END_STREAM flag on last data chunk")
- }
- if got := string(df.Data()); got != msg2 {
- t.Errorf("got DATA %q; want %q", got, msg2)
- }
- }
- })
-}
-
-func TestServer_Response_LargeWrite(t *testing.T) {
- const size = 1 << 20
- const maxFrameSize = 16 << 10
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- n, err := w.Write(bytes.Repeat([]byte("a"), size))
- if err != nil {
- return fmt.Errorf("Write error: %v", err)
- }
- if n != size {
- return fmt.Errorf("wrong size %d from Write", n)
- }
- return nil
- }, func(st *serverTester) {
- if err := st.fr.WriteSettings(
- Setting{SettingInitialWindowSize, 0},
- Setting{SettingMaxFrameSize, maxFrameSize},
- ); err != nil {
- t.Fatal(err)
- }
- st.wantSettingsAck()
-
- getSlash(st) // make the single request
-
- // Give the handler quota to write:
- if err := st.fr.WriteWindowUpdate(1, size); err != nil {
- t.Fatal(err)
- }
- // Give the handler quota to write to connection-level
- // window as well
- if err := st.fr.WriteWindowUpdate(0, size); err != nil {
- t.Fatal(err)
- }
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"}, // sniffed
- // and no content-length
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
- var bytes, frames int
- for {
- df := st.wantData()
- bytes += len(df.Data())
- frames++
- for _, b := range df.Data() {
- if b != 'a' {
- t.Fatal("non-'a' byte seen in DATA")
- }
- }
- if df.StreamEnded() {
- break
- }
- }
- if bytes != size {
- t.Errorf("Got %d bytes; want %d", bytes, size)
- }
- if want := int(size / maxFrameSize); frames < want || frames > want*2 {
- t.Errorf("Got %d frames; want %d", frames, size)
- }
- })
-}
-
-// Test that the handler can't write more than the client allows
-func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) {
- const size = 1 << 20
- const maxFrameSize = 16 << 10
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.(http.Flusher).Flush()
- n, err := w.Write(bytes.Repeat([]byte("a"), size))
- if err != nil {
- return fmt.Errorf("Write error: %v", err)
- }
- if n != size {
- return fmt.Errorf("wrong size %d from Write", n)
- }
- return nil
- }, func(st *serverTester) {
- // Set the window size to something explicit for this test.
- // It's also how much initial data we expect.
- const initWindowSize = 123
- if err := st.fr.WriteSettings(
- Setting{SettingInitialWindowSize, initWindowSize},
- Setting{SettingMaxFrameSize, maxFrameSize},
- ); err != nil {
- t.Fatal(err)
- }
- st.wantSettingsAck()
-
- getSlash(st) // make the single request
- defer func() { st.fr.WriteRSTStream(1, ErrCodeCancel) }()
-
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
-
- df := st.wantData()
- if got := len(df.Data()); got != initWindowSize {
- t.Fatalf("Initial window size = %d but got DATA with %d bytes", initWindowSize, got)
- }
-
- for _, quota := range []int{1, 13, 127} {
- if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil {
- t.Fatal(err)
- }
- df := st.wantData()
- if int(quota) != len(df.Data()) {
- t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota)
- }
- }
-
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM.
-func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) {
- const size = 1 << 20
- const maxFrameSize = 16 << 10
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.(http.Flusher).Flush()
- errc := make(chan error, 1)
- go func() {
- _, err := w.Write(bytes.Repeat([]byte("a"), size))
- errc <- err
- }()
- select {
- case err := <-errc:
- if err == nil {
- return errors.New("unexpected nil error from Write in handler")
- }
- return nil
- case <-time.After(2 * time.Second):
- return errors.New("timeout waiting for Write in handler")
- }
- }, func(st *serverTester) {
- if err := st.fr.WriteSettings(
- Setting{SettingInitialWindowSize, 0},
- Setting{SettingMaxFrameSize, maxFrameSize},
- ); err != nil {
- t.Fatal(err)
- }
- st.wantSettingsAck()
-
- getSlash(st) // make the single request
-
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
-
- if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
- t.Fatal(err)
- }
- })
-}
-
-func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.(http.Flusher).Flush()
- // Nothing; send empty DATA
- return nil
- }, func(st *serverTester) {
- // Handler gets no data quota:
- if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil {
- t.Fatal(err)
- }
- st.wantSettingsAck()
-
- getSlash(st) // make the single request
-
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
-
- df := st.wantData()
- if got := len(df.Data()); got != 0 {
- t.Fatalf("unexpected %d DATA bytes; want 0", got)
- }
- if !df.StreamEnded() {
- t.Fatal("DATA didn't have END_STREAM")
- }
- })
-}
-
-func TestServer_Response_Automatic100Continue(t *testing.T) {
- const msg = "foo"
- const reply = "bar"
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- if v := r.Header.Get("Expect"); v != "" {
- t.Errorf("Expect header = %q; want empty", v)
- }
- buf := make([]byte, len(msg))
- // This read should trigger the 100-continue being sent.
- if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg {
- return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg)
- }
- _, err := io.WriteString(w, reply)
- return err
- }, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"),
- EndStream: false,
- EndHeaders: true,
- })
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "100"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Fatalf("Got headers %v; want %v", goth, wanth)
- }
-
- // Okay, they sent status 100, so we can send our
- // gigantic and/or sensitive "foo" payload now.
- st.writeData(1, true, []byte(msg))
-
- st.wantWindowUpdate(0, uint32(len(msg)))
-
- hf = st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("expected data to follow")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- goth = st.decodeHeader(hf.HeaderBlockFragment())
- wanth = [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", strconv.Itoa(len(reply))},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Got headers %v; want %v", goth, wanth)
- }
-
- df := st.wantData()
- if string(df.Data()) != reply {
- t.Errorf("Client read %q; want %q", df.Data(), reply)
- }
- if !df.StreamEnded() {
- t.Errorf("expect data stream end")
- }
- })
-}
-
-func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) {
- errc := make(chan error, 1)
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- p := []byte("some data.\n")
- for {
- _, err := w.Write(p)
- if err != nil {
- errc <- err
- return nil
- }
- }
- }, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: false,
- EndHeaders: true,
- })
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("unexpected END_STREAM flag")
- }
- if !hf.HeadersEnded() {
- t.Fatal("want END_HEADERS flag")
- }
- // Close the connection and wait for the handler to (hopefully) notice.
- st.cc.Close()
- select {
- case <-errc:
- case <-time.After(5 * time.Second):
- t.Error("timeout")
- }
- })
-}
-
-func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
- const testPath = "/some/path"
-
- inHandler := make(chan uint32)
- leaveHandler := make(chan bool)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- id := w.(*responseWriter).rws.stream.id
- inHandler <- id
- if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath {
- t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath)
- }
- <-leaveHandler
- })
- defer st.Close()
- st.greet()
- nextStreamID := uint32(1)
- streamID := func() uint32 {
- defer func() { nextStreamID += 2 }()
- return nextStreamID
- }
- sendReq := func(id uint32, headers ...string) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: id,
- BlockFragment: st.encodeHeader(headers...),
- EndStream: true,
- EndHeaders: true,
- })
- }
- for i := 0; i < defaultMaxStreams; i++ {
- sendReq(streamID())
- <-inHandler
- }
- defer func() {
- for i := 0; i < defaultMaxStreams; i++ {
- leaveHandler <- true
- }
- }()
-
- // And this one should cross the limit:
- // (It's also sent as a CONTINUATION, to verify we still track the decoder context,
- // even if we're rejecting it)
- rejectID := streamID()
- headerBlock := st.encodeHeader(":path", testPath)
- frag1, frag2 := headerBlock[:3], headerBlock[3:]
- st.writeHeaders(HeadersFrameParam{
- StreamID: rejectID,
- BlockFragment: frag1,
- EndStream: true,
- EndHeaders: false, // CONTINUATION coming
- })
- if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil {
- t.Fatal(err)
- }
- st.wantRSTStream(rejectID, ErrCodeProtocol)
-
- // But let a handler finish:
- leaveHandler <- true
- st.wantHeaders()
-
- // And now another stream should be able to start:
- goodID := streamID()
- sendReq(goodID, ":path", testPath)
- select {
- case got := <-inHandler:
- if got != goodID {
- t.Errorf("Got stream %d; want %d", got, goodID)
- }
- case <-time.After(3 * time.Second):
- t.Error("timeout waiting for handler")
- }
-}
-
-// So many response headers that the server needs to use CONTINUATION frames:
-func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- h := w.Header()
- for i := 0; i < 5000; i++ {
- h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i))
- }
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.HeadersEnded() {
- t.Fatal("got unwanted END_HEADERS flag")
- }
- n := 0
- for {
- n++
- cf := st.wantContinuation()
- if cf.HeadersEnded() {
- break
- }
- }
- if n < 5 {
- t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n)
- }
- })
-}
-
-// This previously crashed (reported by Mathieu Lonjaret as observed
-// while using Camlistore) because we got a DATA frame from the client
-// after the handler exited and our logic at the time was wrong,
-// keeping a stream in the map in stateClosed, which tickled an
-// invariant check later when we tried to remove that stream (via
-// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop
-// ended.
-func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- // nothing
- return nil
- }, func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: false, // DATA is coming
- EndHeaders: true,
- })
- hf := st.wantHeaders()
- if !hf.HeadersEnded() || !hf.StreamEnded() {
- t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf)
- }
-
- // Sent when the a Handler closes while a client has
- // indicated it's still sending DATA:
- st.wantRSTStream(1, ErrCodeCancel)
-
- // Now the handler has ended, so it's ended its
- // stream, but the client hasn't closed its side
- // (stateClosedLocal). So send more data and verify
- // it doesn't crash with an internal invariant panic, like
- // it did before.
- st.writeData(1, true, []byte("foo"))
-
- // Sent after a peer sends data anyway (admittedly the
- // previous RST_STREAM might've still been in-flight),
- // but they'll get the more friendly 'cancel' code
- // first.
- st.wantRSTStream(1, ErrCodeStreamClosed)
-
- // Set up a bunch of machinery to record the panic we saw
- // previously.
- var (
- panMu sync.Mutex
- panicVal interface{}
- )
-
- testHookOnPanicMu.Lock()
- testHookOnPanic = func(sc *serverConn, pv interface{}) bool {
- panMu.Lock()
- panicVal = pv
- panMu.Unlock()
- return true
- }
- testHookOnPanicMu.Unlock()
-
- // Now force the serve loop to end, via closing the connection.
- st.cc.Close()
- select {
- case <-st.sc.doneServing:
- // Loop has exited.
- panMu.Lock()
- got := panicVal
- panMu.Unlock()
- if got != nil {
- t.Errorf("Got panic: %v", got)
- }
- case <-time.After(5 * time.Second):
- t.Error("timeout")
- }
- })
-}
-
-func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) }
-func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) }
-
-func testRejectTLS(t *testing.T, max uint16) {
- st := newServerTester(t, nil, func(c *tls.Config) {
- c.MaxVersion = max
- })
- defer st.Close()
- gf := st.wantGoAway()
- if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
- t.Errorf("Got error code %v; want %v", got, want)
- }
-}
-
-func TestServer_Rejects_TLSBadCipher(t *testing.T) {
- st := newServerTester(t, nil, func(c *tls.Config) {
- // Only list bad ones:
- c.CipherSuites = []uint16{
- tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- }
- })
- defer st.Close()
- gf := st.wantGoAway()
- if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
- t.Errorf("Got error code %v; want %v", got, want)
- }
-}
-
-func TestServer_Advertises_Common_Cipher(t *testing.T) {
- const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- st := newServerTester(t, nil, func(c *tls.Config) {
- // Have the client only support the one required by the spec.
- c.CipherSuites = []uint16{requiredSuite}
- }, func(ts *httptest.Server) {
- var srv *http.Server = ts.Config
- // Have the server configured with no specific cipher suites.
- // This tests that Go's defaults include the required one.
- srv.TLSConfig = nil
- })
- defer st.Close()
- st.greet()
-}
-
-func (st *serverTester) onHeaderField(f hpack.HeaderField) {
- if f.Name == "date" {
- return
- }
- st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value})
-}
-
-func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) {
- st.decodedHeaders = nil
- if _, err := st.hpackDec.Write(headerBlock); err != nil {
- st.t.Fatalf("hpack decoding error: %v", err)
- }
- if err := st.hpackDec.Close(); err != nil {
- st.t.Fatalf("hpack decoding error: %v", err)
- }
- return st.decodedHeaders
-}
-
-// testServerResponse sets up an idle HTTP/2 connection and lets you
-// write a single request with writeReq, and then reply to it in some way with the provided handler,
-// and then verify the output with the serverTester again (assuming the handler returns nil)
-func testServerResponse(t testing.TB,
- handler func(http.ResponseWriter, *http.Request) error,
- client func(*serverTester),
-) {
- errc := make(chan error, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- if r.Body == nil {
- t.Fatal("nil Body")
- }
- errc <- handler(w, r)
- })
- defer st.Close()
-
- donec := make(chan bool)
- go func() {
- defer close(donec)
- st.greet()
- client(st)
- }()
-
- select {
- case <-donec:
- return
- case <-time.After(5 * time.Second):
- t.Fatal("timeout")
- }
-
- select {
- case err := <-errc:
- if err != nil {
- t.Fatalf("Error in handler: %v", err)
- }
- case <-time.After(2 * time.Second):
- t.Error("timeout waiting for handler to finish")
- }
-}
-
-// readBodyHandler returns an http Handler func that reads len(want)
-// bytes from r.Body and fails t if the contents read were not
-// the value of want.
-func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) {
- return func(w http.ResponseWriter, r *http.Request) {
- buf := make([]byte, len(want))
- _, err := io.ReadFull(r.Body, buf)
- if err != nil {
- t.Error(err)
- return
- }
- if string(buf) != want {
- t.Errorf("read %q; want %q", buf, want)
- }
- }
-}
-
-// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See:
-// https://github.com/tatsuhiro-t/nghttp2/issues/140 &
-// http://sourceforge.net/p/curl/bugs/1472/
-func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) }
-func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) }
-
-func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) {
- if runtime.GOOS != "linux" {
- t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
- }
- if testing.Short() {
- t.Skip("skipping curl test in short mode")
- }
- requireCurl(t)
- var gotConn int32
- testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) }
-
- const msg = "Hello from curl!\n"
- ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Foo", "Bar")
- w.Header().Set("Client-Proto", r.Proto)
- io.WriteString(w, msg)
- }))
- ConfigureServer(ts.Config, &Server{
- PermitProhibitedCipherSuites: permitProhibitedCipherSuites,
- })
- ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
- ts.StartTLS()
- defer ts.Close()
-
- t.Logf("Running test server for curl to hit at: %s", ts.URL)
- container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL)
- defer kill(container)
- resc := make(chan interface{}, 1)
- go func() {
- res, err := dockerLogs(container)
- if err != nil {
- resc <- err
- } else {
- resc <- res
- }
- }()
- select {
- case res := <-resc:
- if err, ok := res.(error); ok {
- t.Fatal(err)
- }
- body := string(res.([]byte))
- // Search for both "key: value" and "key:value", since curl changed their format
- // Our Dockerfile contains the latest version (no space), but just in case people
- // didn't rebuild, check both.
- if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") {
- t.Errorf("didn't see foo: Bar header")
- t.Logf("Got: %s", body)
- }
- if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") {
- t.Errorf("didn't see client-proto: HTTP/2 header")
- t.Logf("Got: %s", res)
- }
- if !strings.Contains(string(res.([]byte)), msg) {
- t.Errorf("didn't see %q content", msg)
- t.Logf("Got: %s", res)
- }
- case <-time.After(3 * time.Second):
- t.Errorf("timeout waiting for curl")
- }
-
- if atomic.LoadInt32(&gotConn) == 0 {
- t.Error("never saw an http2 connection")
- }
-}
-
-var doh2load = flag.Bool("h2load", false, "Run h2load test")
-
-func TestServerWithH2Load(t *testing.T) {
- if !*doh2load {
- t.Skip("Skipping without --h2load flag.")
- }
- if runtime.GOOS != "linux" {
- t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
- }
- requireH2load(t)
-
- msg := strings.Repeat("Hello, h2load!\n", 5000)
- ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, msg)
- w.(http.Flusher).Flush()
- io.WriteString(w, msg)
- }))
- ts.StartTLS()
- defer ts.Close()
-
- cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl",
- "-n100000", "-c100", "-m100", ts.URL)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if err := cmd.Run(); err != nil {
- t.Fatal(err)
- }
-}
-
-// Issue 12843
-func TestServerDoS_MaxHeaderListSize(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
- defer st.Close()
-
- // shake hands
- st.writePreface()
- st.writeInitialSettings()
- frameSize := defaultMaxReadFrameSize
- var advHeaderListSize *uint32
- st.wantSettings().ForeachSetting(func(s Setting) error {
- switch s.ID {
- case SettingMaxFrameSize:
- if s.Val < minMaxFrameSize {
- frameSize = minMaxFrameSize
- } else if s.Val > maxFrameSize {
- frameSize = maxFrameSize
- } else {
- frameSize = int(s.Val)
- }
- case SettingMaxHeaderListSize:
- advHeaderListSize = &s.Val
- }
- return nil
- })
- st.writeSettingsAck()
- st.wantSettingsAck()
-
- if advHeaderListSize == nil {
- t.Errorf("server didn't advertise a max header list size")
- } else if *advHeaderListSize == 0 {
- t.Errorf("server advertised a max header list size of 0")
- }
-
- st.encodeHeaderField(":method", "GET")
- st.encodeHeaderField(":path", "/")
- st.encodeHeaderField(":scheme", "https")
- cookie := strings.Repeat("*", 4058)
- st.encodeHeaderField("cookie", cookie)
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.headerBuf.Bytes(),
- EndStream: true,
- EndHeaders: false,
- })
-
- // Capture the short encoding of a duplicate ~4K cookie, now
- // that we've already sent it once.
- st.headerBuf.Reset()
- st.encodeHeaderField("cookie", cookie)
-
- // Now send 1MB of it.
- const size = 1 << 20
- b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len())
- for len(b) > 0 {
- chunk := b
- if len(chunk) > frameSize {
- chunk = chunk[:frameSize]
- }
- b = b[len(chunk):]
- st.fr.WriteContinuation(1, len(b) == 0, chunk)
- }
-
- h := st.wantHeaders()
- if !h.HeadersEnded() {
- t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
- }
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "431"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", "63"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
- }
-}
-
-func TestCompressionErrorOnWrite(t *testing.T) {
- const maxStrLen = 8 << 10
- var serverConfig *http.Server
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- // No response body.
- }, func(ts *httptest.Server) {
- serverConfig = ts.Config
- serverConfig.MaxHeaderBytes = maxStrLen
- })
- st.addLogFilter("connection error: COMPRESSION_ERROR")
- defer st.Close()
- st.greet()
-
- maxAllowed := st.sc.framer.maxHeaderStringLen()
-
- // Crank this up, now that we have a conn connected with the
- // hpack.Decoder's max string length set has been initialized
- // from the earlier low ~8K value. We want this higher so don't
- // hit the max header list size. We only want to test hitting
- // the max string size.
- serverConfig.MaxHeaderBytes = 1 << 20
-
- // First a request with a header that's exactly the max allowed size
- // for the hpack compression. It's still too long for the header list
- // size, so we'll get the 431 error, but that keeps the compression
- // context still valid.
- hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed))
-
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: hbf,
- EndStream: true,
- EndHeaders: true,
- })
- h := st.wantHeaders()
- if !h.HeadersEnded() {
- t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
- }
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "431"},
- {"content-type", "text/html; charset=utf-8"},
- {"content-length", "63"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
- }
- df := st.wantData()
- if !strings.Contains(string(df.Data()), "HTTP Error 431") {
- t.Errorf("Unexpected data body: %q", df.Data())
- }
- if !df.StreamEnded() {
- t.Fatalf("expect data stream end")
- }
-
- // And now send one that's just one byte too big.
- hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1))
- st.writeHeaders(HeadersFrameParam{
- StreamID: 3,
- BlockFragment: hbf,
- EndStream: true,
- EndHeaders: true,
- })
- ga := st.wantGoAway()
- if ga.ErrCode != ErrCodeCompression {
- t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
- }
-}
-
-func TestCompressionErrorOnClose(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- // No response body.
- })
- st.addLogFilter("connection error: COMPRESSION_ERROR")
- defer st.Close()
- st.greet()
-
- hbf := st.encodeHeader("foo", "bar")
- hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails.
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: hbf,
- EndStream: true,
- EndHeaders: true,
- })
- ga := st.wantGoAway()
- if ga.ErrCode != ErrCodeCompression {
- t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
- }
-}
-
-// test that a server handler can read trailers from a client
-func TestServerReadsTrailers(t *testing.T) {
- const testBody = "some test body"
- writeReq := func(st *serverTester) {
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"),
- EndStream: false,
- EndHeaders: true,
- })
- st.writeData(1, false, []byte(testBody))
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeaderRaw(
- "foo", "foov",
- "bar", "barv",
- "baz", "bazv",
- "surprise", "wasn't declared; shouldn't show up",
- ),
- EndStream: true,
- EndHeaders: true,
- })
- }
- checkReq := func(r *http.Request) {
- wantTrailer := http.Header{
- "Foo": nil,
- "Bar": nil,
- "Baz": nil,
- }
- if !reflect.DeepEqual(r.Trailer, wantTrailer) {
- t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer)
- }
- slurp, err := ioutil.ReadAll(r.Body)
- if string(slurp) != testBody {
- t.Errorf("read body %q; want %q", slurp, testBody)
- }
- if err != nil {
- t.Fatalf("Body slurp: %v", err)
- }
- wantTrailerAfter := http.Header{
- "Foo": {"foov"},
- "Bar": {"barv"},
- "Baz": {"bazv"},
- }
- if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) {
- t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter)
- }
- }
- testServerRequest(t, writeReq, checkReq)
-}
-
-// test that a server handler can send trailers
-func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) }
-func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) }
-
-func testServerWritesTrailers(t *testing.T, withFlush bool) {
- // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B")
- w.Header().Add("Trailer", "Server-Trailer-C")
-
- // TODO: decide if the server should filter these while
- // writing the Trailer header in the response. Currently it
- // appears net/http doesn't do this for http/1.1
- w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered
- w.Header().Set("Foo", "Bar")
- w.Header().Set("Content-Length", "5")
-
- io.WriteString(w, "Hello")
- if withFlush {
- w.(http.Flusher).Flush()
- }
- w.Header().Set("Server-Trailer-A", "valuea")
- w.Header().Set("Server-Trailer-C", "valuec") // skipping B
- // After a flush, random keys like Server-Surprise shouldn't show up:
- w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!")
- // But we do permit promoting keys to trailers after a
- // flush if they start with the magic
- // otherwise-invalid "Trailer:" prefix:
- w.Header().Set("Trailer:Post-Header-Trailer", "hi1")
- w.Header().Set("Trailer:post-header-trailer2", "hi2")
- w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 2616 14.40")
- w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 2616 14.40")
- w.Header().Set("Trailer", "should not be included; Forbidden by RFC 2616 14.40")
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if hf.StreamEnded() {
- t.Fatal("response HEADERS had END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("response HEADERS didn't have END_HEADERS")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"foo", "Bar"},
- {"trailer", "Server-Trailer-A, Server-Trailer-B"},
- {"trailer", "Server-Trailer-C"},
- {"trailer", "Transfer-Encoding, Content-Length, Trailer"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", "5"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
- }
- df := st.wantData()
- if string(df.Data()) != "Hello" {
- t.Fatalf("Client read %q; want Hello", df.Data())
- }
- if df.StreamEnded() {
- t.Fatalf("data frame had STREAM_ENDED")
- }
- tf := st.wantHeaders() // for the trailers
- if !tf.StreamEnded() {
- t.Fatalf("trailers HEADERS lacked END_STREAM")
- }
- if !tf.HeadersEnded() {
- t.Fatalf("trailers HEADERS lacked END_HEADERS")
- }
- wanth = [][2]string{
- {"post-header-trailer", "hi1"},
- {"post-header-trailer2", "hi2"},
- {"server-trailer-a", "valuea"},
- {"server-trailer-c", "valuec"},
- }
- goth = st.decodeHeader(tf.HeaderBlockFragment())
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
- }
- })
-}
-
-// validate transmitted header field names & values
-// golang.org/issue/14048
-func TestServerDoesntWriteInvalidHeaders(t *testing.T) {
- testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
- w.Header().Add("OK1", "x")
- w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key
- w.Header().Add("Bad1\x00", "x") // null in key
- w.Header().Add("Bad2", "x\x00y") // null in value
- return nil
- }, func(st *serverTester) {
- getSlash(st)
- hf := st.wantHeaders()
- if !hf.StreamEnded() {
- t.Error("response HEADERS lacked END_STREAM")
- }
- if !hf.HeadersEnded() {
- t.Fatal("response HEADERS didn't have END_HEADERS")
- }
- goth := st.decodeHeader(hf.HeaderBlockFragment())
- wanth := [][2]string{
- {":status", "200"},
- {"ok1", "x"},
- {"content-type", "text/plain; charset=utf-8"},
- {"content-length", "0"},
- }
- if !reflect.DeepEqual(goth, wanth) {
- t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
- }
- })
-}
-
-func BenchmarkServerGets(b *testing.B) {
- b.ReportAllocs()
-
- const msg = "Hello, world"
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, msg)
- })
- defer st.Close()
- st.greet()
-
- // Give the server quota to reply. (plus it has the the 64KB)
- if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
- b.Fatal(err)
- }
-
- for i := 0; i < b.N; i++ {
- id := 1 + uint32(i)*2
- st.writeHeaders(HeadersFrameParam{
- StreamID: id,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- st.wantHeaders()
- df := st.wantData()
- if !df.StreamEnded() {
- b.Fatalf("DATA didn't have END_STREAM; got %v", df)
- }
- }
-}
-
-func BenchmarkServerPosts(b *testing.B) {
- b.ReportAllocs()
-
- const msg = "Hello, world"
- st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, msg)
- })
- defer st.Close()
- st.greet()
-
- // Give the server quota to reply. (plus it has the the 64KB)
- if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
- b.Fatal(err)
- }
-
- for i := 0; i < b.N; i++ {
- id := 1 + uint32(i)*2
- st.writeHeaders(HeadersFrameParam{
- StreamID: id,
- BlockFragment: st.encodeHeader(":method", "POST"),
- EndStream: false,
- EndHeaders: true,
- })
- st.writeData(id, true, nil)
- st.wantHeaders()
- df := st.wantData()
- if !df.StreamEnded() {
- b.Fatalf("DATA didn't have END_STREAM; got %v", df)
- }
- }
-}
-
-// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53
-// Verify we don't hang.
-func TestIssue53(t *testing.T) {
- const data = "PRI * HTTP/2.0\r\n\r\nSM" +
- "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad"
- s := &http.Server{
- ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags),
- Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- w.Write([]byte("hello"))
- }),
- }
- s2 := &Server{
- MaxReadFrameSize: 1 << 16,
- PermitProhibitedCipherSuites: true,
- }
- c := &issue53Conn{[]byte(data), false, false}
- s2.ServeConn(c, &ServeConnOpts{BaseConfig: s})
- if !c.closed {
- t.Fatal("connection is not closed")
- }
-}
-
-type issue53Conn struct {
- data []byte
- closed bool
- written bool
-}
-
-func (c *issue53Conn) Read(b []byte) (n int, err error) {
- if len(c.data) == 0 {
- return 0, io.EOF
- }
- n = copy(b, c.data)
- c.data = c.data[n:]
- return
-}
-
-func (c *issue53Conn) Write(b []byte) (n int, err error) {
- c.written = true
- return len(b), nil
-}
-
-func (c *issue53Conn) Close() error {
- c.closed = true
- return nil
-}
-
-func (c *issue53Conn) LocalAddr() net.Addr { return &net.TCPAddr{net.IP{127, 0, 0, 1}, 49706, ""} }
-func (c *issue53Conn) RemoteAddr() net.Addr { return &net.TCPAddr{net.IP{127, 0, 0, 1}, 49706, ""} }
-func (c *issue53Conn) SetDeadline(t time.Time) error { return nil }
-func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil }
-func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil }
-
-// golang.org/issue/12895
-func TestConfigureServer(t *testing.T) {
- tests := []struct {
- name string
- in http.Server
- wantErr string
- }{
- {
- name: "empty server",
- in: http.Server{},
- },
- {
- name: "just the required cipher suite",
- in: http.Server{
- TLSConfig: &tls.Config{
- CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
- },
- },
- },
- {
- name: "missing required cipher suite",
- in: http.Server{
- TLSConfig: &tls.Config{
- CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384},
- },
- },
- wantErr: "is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- },
- {
- name: "required after bad",
- in: http.Server{
- TLSConfig: &tls.Config{
- CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
- },
- },
- wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after",
- },
- {
- name: "bad after required",
- in: http.Server{
- TLSConfig: &tls.Config{
- CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA},
- },
- },
- },
- }
- for _, tt := range tests {
- err := ConfigureServer(&tt.in, nil)
- if (err != nil) != (tt.wantErr != "") {
- if tt.wantErr != "" {
- t.Errorf("%s: success, but want error", tt.name)
- } else {
- t.Errorf("%s: unexpected error: %v", tt.name, err)
- }
- }
- if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) {
- t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr)
- }
- if err == nil && !tt.in.TLSConfig.PreferServerCipherSuites {
- t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name)
- }
- }
-}
-
-func TestServerRejectHeadWithBody(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- // No response body.
- })
- defer st.Close()
- st.greet()
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "HEAD"),
- EndStream: false, // what we're testing, a bogus HEAD request with body
- EndHeaders: true,
- })
- st.wantRSTStream(1, ErrCodeProtocol)
-}
-
-func TestServerNoAutoContentLengthOnHead(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- // No response body. (or smaller than one frame)
- })
- defer st.Close()
- st.greet()
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1, // clients send odd numbers
- BlockFragment: st.encodeHeader(":method", "HEAD"),
- EndStream: true,
- EndHeaders: true,
- })
- h := st.wantHeaders()
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "200"},
- {"content-type", "text/plain; charset=utf-8"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
- }
-}
-
-// golang.org/issue/13495
-func TestServerNoDuplicateContentType(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- w.Header()["Content-Type"] = []string{""}
- fmt.Fprintf(w, "hi")
- })
- defer st.Close()
- st.greet()
- st.writeHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: st.encodeHeader(),
- EndStream: true,
- EndHeaders: true,
- })
- h := st.wantHeaders()
- headers := st.decodeHeader(h.HeaderBlockFragment())
- want := [][2]string{
- {":status", "200"},
- {"content-type", ""},
- {"content-length", "41"},
- }
- if !reflect.DeepEqual(headers, want) {
- t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
- }
-}
-
-type connStateConn struct {
- net.Conn
- cs tls.ConnectionState
-}
-
-func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs }
-
-// golang.org/issue/12737 -- handle any net.Conn, not just
-// *tls.Conn.
-func TestServerHandleCustomConn(t *testing.T) {
- var s Server
- c1, c2 := net.Pipe()
- clientDone := make(chan struct{})
- handlerDone := make(chan struct{})
- var req *http.Request
- go func() {
- defer close(clientDone)
- defer c2.Close()
- fr := NewFramer(c2, c2)
- io.WriteString(c2, ClientPreface)
- fr.WriteSettings()
- fr.WriteSettingsAck()
- f, err := fr.ReadFrame()
- if err != nil {
- t.Error(err)
- return
- }
- if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() {
- t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f))
- return
- }
- f, err = fr.ReadFrame()
- if err != nil {
- t.Error(err)
- return
- }
- if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() {
- t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f))
- return
- }
- var henc hpackEncoder
- fr.WriteHeaders(HeadersFrameParam{
- StreamID: 1,
- BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"),
- EndStream: true,
- EndHeaders: true,
- })
- go io.Copy(ioutil.Discard, c2)
- <-handlerDone
- }()
- const testString = "my custom ConnectionState"
- fakeConnState := tls.ConnectionState{
- ServerName: testString,
- Version: tls.VersionTLS12,
- }
- go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{
- BaseConfig: &http.Server{
- Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- defer close(handlerDone)
- req = r
- }),
- }})
- select {
- case <-clientDone:
- case <-time.After(5 * time.Second):
- t.Fatal("timeout waiting for handler")
- }
- if req.TLS == nil {
- t.Fatalf("Request.TLS is nil. Got: %#v", req)
- }
- if req.TLS.ServerName != testString {
- t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString)
- }
-}
-
-type hpackEncoder struct {
- enc *hpack.Encoder
- buf bytes.Buffer
-}
-
-func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte {
- if len(headers)%2 == 1 {
- panic("odd number of kv args")
- }
- he.buf.Reset()
- if he.enc == nil {
- he.enc = hpack.NewEncoder(&he.buf)
- }
- for len(headers) > 0 {
- k, v := headers[0], headers[1]
- err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v})
- if err != nil {
- t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
- }
- headers = headers[2:]
- }
- return he.buf.Bytes()
-}
diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml
deleted file mode 100644
index 31a84bed..00000000
--- a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml
+++ /dev/null
@@ -1,5021 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol version 2
-
-
- Twist
-
- mbelshe@chromium.org
-
-
-
-
- Google, Inc
-
- fenix@google.com
-
-
-
-
- Mozilla
-
-
- 331 E Evelyn Street
- Mountain View
- CA
- 94041
- US
-
- martin.thomson@gmail.com
-
-
-
-
- Applications
- HTTPbis
- HTTP
- SPDY
- Web
-
-
-
- This specification describes an optimized expression of the semantics of the Hypertext
- Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a
- reduced perception of latency by introducing header field compression and allowing multiple
- concurrent messages on the same connection. It also introduces unsolicited push of
- representations from servers to clients.
-
-
- This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax.
- HTTP's existing semantics remain unchanged.
-
-
-
-
-
- Discussion of this draft takes place on the HTTPBIS working group mailing list
- (ietf-http-wg@w3.org), which is archived at .
-
-
- Working Group information can be found at ; that specific to HTTP/2 are at .
-
-
- The changes in this draft are summarized in .
-
-
-
-
-
-
-
-
-
- The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the
- HTTP/1.1 message format () has
- several characteristics that have a negative overall effect on application performance
- today.
-
-
- In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given
- TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed
- request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1
- clients that need to make many requests typically use multiple connections to a server in
- order to achieve concurrency and thereby reduce latency.
-
-
- Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary
- network traffic, as well as causing the initial TCP congestion
- window to quickly fill. This can result in excessive latency when multiple requests are
- made on a new TCP connection.
-
-
- HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an
- underlying connection. Specifically, it allows interleaving of request and response
- messages on the same connection and uses an efficient coding for HTTP header fields. It
- also allows prioritization of requests, letting more important requests complete more
- quickly, further improving performance.
-
-
- The resulting protocol is more friendly to the network, because fewer TCP connections can
- be used in comparison to HTTP/1.x. This means less competition with other flows, and
- longer-lived connections, which in turn leads to better utilization of available network
- capacity.
-
-
- Finally, HTTP/2 also enables more efficient processing of messages through use of binary
- message framing.
-
-
-
-
-
- HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core
- features of HTTP/1.1, but aims to be more efficient in several ways.
-
-
- The basic protocol unit in HTTP/2 is a frame. Each frame
- type serves a different purpose. For example, HEADERS and
- DATA frames form the basis of HTTP requests and
- responses; other frame types like SETTINGS,
- WINDOW_UPDATE, and PUSH_PROMISE are used in support of other
- HTTP/2 features.
-
-
- Multiplexing of requests is achieved by having each HTTP request-response exchange
- associated with its own stream. Streams are largely
- independent of each other, so a blocked or stalled request or response does not prevent
- progress on other streams.
-
-
- Flow control and prioritization ensure that it is possible to efficiently use multiplexed
- streams. Flow control helps to ensure that only data that
- can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed
- to the most important streams first.
-
-
- HTTP/2 adds a new interaction mode, whereby a server can push
- responses to a client. Server push allows a server to speculatively send a client
- data that the server anticipates the client will need, trading off some network usage
- against a potential latency gain. The server does this by synthesizing a request, which it
- sends as a PUSH_PROMISE frame. The server is then able to send a response to
- the synthetic request on a separate stream.
-
-
- Frames that contain HTTP header fields are compressed.
- HTTP requests can be highly redundant, so compression can reduce the size of requests and
- responses significantly.
-
-
-
-
- The HTTP/2 specification is split into four parts:
-
-
- Starting HTTP/2 covers how an HTTP/2 connection is
- initiated.
-
-
- The framing and streams layers describe the way HTTP/2 frames are
- structured and formed into multiplexed streams.
-
-
- Frame and error
- definitions include details of the frame and error types used in HTTP/2.
-
-
- HTTP mappings and additional
- requirements describe how HTTP semantics are expressed using frames and
- streams.
-
-
-
-
- While some of the frame and stream layer concepts are isolated from HTTP, this
- specification does not define a completely generic framing layer. The framing and streams
- layers are tailored to the needs of the HTTP protocol and server push.
-
-
-
-
-
- The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD
- NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as
- described in RFC 2119.
-
-
- All numeric values are in network byte order. Values are unsigned unless otherwise
- indicated. Literal values are provided in decimal or hexadecimal as appropriate.
- Hexadecimal literals are prefixed with 0x to distinguish them
- from decimal literals.
-
-
- The following terms are used:
-
-
- The endpoint initiating the HTTP/2 connection.
-
-
- A transport-layer connection between two endpoints.
-
-
- An error that affects the entire HTTP/2 connection.
-
-
- Either the client or server of the connection.
-
-
- The smallest unit of communication within an HTTP/2 connection, consisting of a header
- and a variable-length sequence of octets structured according to the frame type.
-
-
- An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint
- that is remote to the primary subject of discussion.
-
-
- An endpoint that is receiving frames.
-
-
- An endpoint that is transmitting frames.
-
-
- The endpoint which did not initiate the HTTP/2 connection.
-
-
- A bi-directional flow of frames across a virtual channel within the HTTP/2 connection.
-
-
- An error on the individual HTTP/2 stream.
-
-
-
-
- Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined
- in .
-
-
-
-
-
-
- An HTTP/2 connection is an application layer protocol running on top of a TCP connection
- (). The client is the TCP connection initiator.
-
-
- HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same
- default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result,
- implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the
- upstream server (the immediate peer to which the client wishes to establish a connection)
- supports HTTP/2.
-
-
-
- The means by which support for HTTP/2 is determined is different for "http" and "https"
- URIs. Discovery for "http" URIs is described in . Discovery
- for "https" URIs is described in .
-
-
-
-
- The protocol defined in this document has two identifiers.
-
-
-
- The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN)
- field and any place that HTTP/2 over TLS is identified.
-
-
- The "h2" string is serialized into an ALPN protocol identifier as the two octet
- sequence: 0x68, 0x32.
-
-
-
-
- The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP.
- This identifier is used in the HTTP/1.1 Upgrade header field and any place that
- HTTP/2 over TCP is identified.
-
-
-
-
-
- Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message
- semantics described in this document.
-
-
- RFC Editor's Note: please remove the remainder of this section prior to the
- publication of a final version of this document.
-
-
- Only implementations of the final, published RFC can identify themselves as "h2" or "h2c".
- Until such an RFC exists, implementations MUST NOT identify themselves using these
- strings.
-
-
- Examples and text throughout the rest of this document use "h2" as a matter of
- editorial convenience only. Implementations of draft versions MUST NOT identify using
- this string.
-
-
- Implementations of draft versions of the protocol MUST add the string "-" and the
- corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11
- over TLS is identified using the string "h2-11".
-
-
- Non-compatible experiments that are based on these draft versions MUST append the string
- "-" and an experiment name to the identifier. For example, an experimental implementation
- of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself
- as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in
- . Experimenters are
- encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list.
-
-
-
-
-
- A client that makes a request for an "http" URI without prior knowledge about support for
- HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade
- header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include
- exactly one HTTP2-Settings header field.
-
-
-
- Requests that contain an entity body MUST be sent in their entirety before the client can
- send HTTP/2 frames. This means that a large request entity can block the use of the
- connection until it is completely sent.
-
-
- If concurrency of an initial request with subsequent requests is important, an OPTIONS
- request can be used to perform the upgrade to HTTP/2, at the cost of an additional
- round-trip.
-
-
- A server that does not support HTTP/2 can respond to the request as though the Upgrade
- header field were absent:
-
-
-
- A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with
- "h2" implies HTTP/2 over TLS, which is instead negotiated as described in .
-
-
- A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols)
- response. After the empty line that terminates the 101 response, the server can begin
- sending HTTP/2 frames. These frames MUST include a response to the request that initiated
- the Upgrade.
-
-
-
-
- The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a
- SETTINGS frame.
-
-
- The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is
- assigned default priority values. Stream 1 is
- implicitly half closed from the client toward the server, since the request is completed
- as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the
- response.
-
-
-
-
- A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field
- that includes parameters that govern the HTTP/2 connection, provided in anticipation of
- the server accepting the request to upgrade.
-
-
-
- A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present,
- or if more than one is present. A server MUST NOT send this header field.
-
-
-
- The content of the HTTP2-Settings header field is the
- payload of a SETTINGS frame (), encoded as a
- base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The
- ABNF production for token68 is
- defined in .
-
-
- Since the upgrade is only intended to apply to the immediate connection, a client
- sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded
- downstream.
-
-
- A server decodes and interprets these values as it would any other
- SETTINGS frame. Acknowledgement of the
- SETTINGS parameters is not necessary, since a 101 response serves as implicit
- acknowledgment. Providing these values in the Upgrade request gives a client an
- opportunity to provide parameters prior to receiving any frames from the server.
-
-
-
-
-
-
- A client that makes a request to an "https" URI uses TLS
- with the application layer protocol negotiation extension.
-
-
- HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a
- client or selected by a server.
-
-
- Once TLS negotiation is complete, both the client and the server send a connection preface.
-
-
-
-
-
- A client can learn that a particular server supports HTTP/2 by other means. For example,
- describes a mechanism for advertising this capability.
-
-
- A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2,
- after the connection preface; a server can
- identify such a connection by the presence of the connection preface. This only affects
- the establishment of HTTP/2 connections over cleartext TCP; implementations that support
- HTTP/2 over TLS MUST use protocol negotiation in TLS.
-
-
- Without additional information, prior support for HTTP/2 is not a strong signal that a
- given server will support HTTP/2 for future connections. For example, it is possible for
- server configurations to change, for configurations to differ between instances in
- clustered servers, or for network conditions to change.
-
-
-
-
-
- Upon establishment of a TCP connection and determination that HTTP/2 will be used by both
- peers, each endpoint MUST send a connection preface as a final confirmation and to
- establish the initial SETTINGS parameters for the HTTP/2 connection. The client and
- server each send a different connection preface.
-
-
- The client connection preface starts with a sequence of 24 octets, which in hex notation
- are:
-
-
-
- (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence
- is followed by a SETTINGS frame (). The
- SETTINGS frame MAY be empty. The client sends the client connection
- preface immediately upon receipt of a 101 Switching Protocols response (indicating a
- successful upgrade), or as the first application data octets of a TLS connection. If
- starting an HTTP/2 connection with prior knowledge of server support for the protocol, the
- client connection preface is sent upon connection establishment.
-
-
-
-
- The client connection preface is selected so that a large proportion of HTTP/1.1 or
- HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note
- that this does not address the concerns raised in .
-
-
-
-
- The server connection preface consists of a potentially empty SETTINGS
- frame () that MUST be the first frame the server sends in the
- HTTP/2 connection.
-
-
- The SETTINGS frames received from a peer as part of the connection preface
- MUST be acknowledged (see ) after sending the connection
- preface.
-
-
- To avoid unnecessary latency, clients are permitted to send additional frames to the
- server immediately after sending the client connection preface, without waiting to receive
- the server connection preface. It is important to note, however, that the server
- connection preface SETTINGS frame might include parameters that necessarily
- alter how a client is expected to communicate with the server. Upon receiving the
- SETTINGS frame, the client is expected to honor any parameters established.
- In some configurations, it is possible for the server to transmit SETTINGS
- before the client sends additional frames, providing an opportunity to avoid this issue.
-
-
- Clients and servers MUST treat an invalid connection preface as a connection error of type
- PROTOCOL_ERROR. A GOAWAY frame ()
- MAY be omitted in this case, since an invalid preface indicates that the peer is not using
- HTTP/2.
-
-
-
-
-
-
- Once the HTTP/2 connection is established, endpoints can begin exchanging frames.
-
-
-
-
- All frames begin with a fixed 9-octet header followed by a variable-length payload.
-
-
-
- The fields of the frame header are defined as:
-
-
-
- The length of the frame payload expressed as an unsigned 24-bit integer. Values
- greater than 214 (16,384) MUST NOT be sent unless the receiver has
- set a larger value for SETTINGS_MAX_FRAME_SIZE.
-
-
- The 9 octets of the frame header are not included in this value.
-
-
-
-
- The 8-bit type of the frame. The frame type determines the format and semantics of
- the frame. Implementations MUST ignore and discard any frame that has a type that
- is unknown.
-
-
-
-
- An 8-bit field reserved for frame-type specific boolean flags.
-
-
- Flags are assigned semantics specific to the indicated frame type. Flags that have
- no defined semantics for a particular frame type MUST be ignored, and MUST be left
- unset (0) when sending.
-
-
-
-
- A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST
- remain unset (0) when sending and MUST be ignored when receiving.
-
-
-
-
- A 31-bit stream identifier (see ). The value 0 is
- reserved for frames that are associated with the connection as a whole as opposed to
- an individual stream.
-
-
-
-
-
- The structure and content of the frame payload is dependent entirely on the frame type.
-
-
-
-
-
- The size of a frame payload is limited by the maximum size that a receiver advertises in
- the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value
- between 214 (16,384) and 224-1 (16,777,215) octets,
- inclusive.
-
-
- All implementations MUST be capable of receiving and minimally processing frames up to
- 214 octets in length, plus the 9 octet frame
- header. The size of the frame header is not included when describing frame sizes.
-
-
- Certain frame types, such as PING, impose additional limits
- on the amount of payload data allowed.
-
-
-
-
- If a frame size exceeds any defined limit, or is too small to contain mandatory frame
- data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error
- in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying
- a header block (that is, HEADERS,
- PUSH_PROMISE, and CONTINUATION), SETTINGS,
- and any WINDOW_UPDATE frame with a stream identifier of 0.
-
-
- Endpoints are not obligated to use all available space in a frame. Responsiveness can be
- improved by using frames that are smaller than the permitted maximum size. Sending large
- frames can result in delays in sending time-sensitive frames (such
- RST_STREAM, WINDOW_UPDATE, or PRIORITY)
- which if blocked by the transmission of a large frame, could affect performance.
-
-
-
-
-
- Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values.
- They are used within HTTP request and response messages as well as server push operations
- (see ).
-
-
- Header lists are collections of zero or more header fields. When transmitted over a
- connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then
- divided into one or more octet sequences, called header block fragments, and transmitted
- within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames.
-
-
- The Cookie header field is treated specially by the HTTP
- mapping (see ).
-
-
- A receiving endpoint reassembles the header block by concatenating its fragments, then
- decompresses the block to reconstruct the header list.
-
-
- A complete header block consists of either:
-
-
- a single HEADERS or PUSH_PROMISE frame,
- with the END_HEADERS flag set, or
-
-
- a HEADERS or PUSH_PROMISE frame with the END_HEADERS
- flag cleared and one or more CONTINUATION frames,
- where the last CONTINUATION frame has the END_HEADERS flag set.
-
-
-
-
- Header compression is stateful. One compression context and one decompression context is
- used for the entire connection. Each header block is processed as a discrete unit.
- Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved
- frames of any other type or from any other stream. The last frame in a sequence of
- HEADERS or CONTINUATION frames MUST have the END_HEADERS
- flag set. The last frame in a sequence of PUSH_PROMISE or
- CONTINUATION frames MUST have the END_HEADERS flag set. This allows a
- header block to be logically equivalent to a single frame.
-
-
- Header block fragments can only be sent as the payload of HEADERS,
- PUSH_PROMISE or CONTINUATION frames, because these frames
- carry data that can modify the compression context maintained by a receiver. An endpoint
- receiving HEADERS, PUSH_PROMISE or
- CONTINUATION frames MUST reassemble header blocks and perform decompression
- even if the frames are to be discarded. A receiver MUST terminate the connection with a
- connection error of type
- COMPRESSION_ERROR if it does not decompress a header block.
-
-
-
-
-
-
- A "stream" is an independent, bi-directional sequence of frames exchanged between the client
- and server within an HTTP/2 connection. Streams have several important characteristics:
-
-
- A single HTTP/2 connection can contain multiple concurrently open streams, with either
- endpoint interleaving frames from multiple streams.
-
-
- Streams can be established and used unilaterally or shared by either the client or
- server.
-
-
- Streams can be closed by either endpoint.
-
-
- The order in which frames are sent on a stream is significant. Recipients process frames
- in the order they are received. In particular, the order of HEADERS,
- and DATA frames is semantically significant.
-
-
- Streams are identified by an integer. Stream identifiers are assigned to streams by the
- endpoint initiating the stream.
-
-
-
-
-
-
- The lifecycle of a stream is shown in .
-
-
-
-
-
- Note that this diagram shows stream state transitions and the frames and flags that affect
- those transitions only. In this regard, CONTINUATION frames do not result
- in state transitions; they are effectively part of the HEADERS or
- PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is
- processed as a separate event to the frame that bears it; a HEADERS frame
- with the END_STREAM flag set can cause two state transitions.
-
-
- Both endpoints have a subjective view of the state of a stream that could be different
- when frames are in transit. Endpoints do not coordinate the creation of streams; they are
- created unilaterally by either endpoint. The negative consequences of a mismatch in
- states are limited to the "closed" state after sending RST_STREAM, where
- frames might be received for some time after closing.
-
-
- Streams have the following states:
-
-
-
-
-
- All streams start in the "idle" state. In this state, no frames have been
- exchanged.
-
-
- The following transitions are valid from this state:
-
-
- Sending or receiving a HEADERS frame causes the stream to become
- "open". The stream identifier is selected as described in . The same HEADERS frame can also
- cause a stream to immediately become "half closed".
-
-
- Sending a PUSH_PROMISE frame marks the associated stream for
- later use. The stream state for the reserved stream transitions to "reserved
- (local)".
-
-
- Receiving a PUSH_PROMISE frame marks the associated stream as
- reserved by the remote peer. The state of the stream becomes "reserved
- (remote)".
-
-
-
-
- Receiving any frames other than HEADERS or
- PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
-
-
- A stream in the "reserved (local)" state is one that has been promised by sending a
- PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an
- idle stream by associating the stream with an open stream that was initiated by the
- remote peer (see ).
-
-
- In this state, only the following transitions are possible:
-
-
- The endpoint can send a HEADERS frame. This causes the stream to
- open in a "half closed (remote)" state.
-
-
- Either endpoint can send a RST_STREAM frame to cause the stream
- to become "closed". This releases the stream reservation.
-
-
-
-
- An endpoint MUST NOT send any type of frame other than HEADERS or
- RST_STREAM in this state.
-
-
- A PRIORITY frame MAY be received in this state. Receiving any type
- of frame other than RST_STREAM or PRIORITY on a stream
- in this state MUST be treated as a connection
- error of type PROTOCOL_ERROR.
-
-
-
-
-
-
- A stream in the "reserved (remote)" state has been reserved by a remote peer.
-
-
- In this state, only the following transitions are possible:
-
-
- Receiving a HEADERS frame causes the stream to transition to
- "half closed (local)".
-
-
- Either endpoint can send a RST_STREAM frame to cause the stream
- to become "closed". This releases the stream reservation.
-
-
-
-
- An endpoint MAY send a PRIORITY frame in this state to reprioritize
- the reserved stream. An endpoint MUST NOT send any type of frame other than
- RST_STREAM, WINDOW_UPDATE, or PRIORITY
- in this state.
-
-
- Receiving any type of frame other than HEADERS or
- RST_STREAM on a stream in this state MUST be treated as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
-
-
- A stream in the "open" state may be used by both peers to send frames of any type.
- In this state, sending peers observe advertised stream
- level flow control limits.
-
-
- From this state either endpoint can send a frame with an END_STREAM flag set, which
- causes the stream to transition into one of the "half closed" states: an endpoint
- sending an END_STREAM flag causes the stream state to become "half closed (local)";
- an endpoint receiving an END_STREAM flag causes the stream state to become "half
- closed (remote)".
-
-
- Either endpoint can send a RST_STREAM frame from this state, causing
- it to transition immediately to "closed".
-
-
-
-
-
-
- A stream that is in the "half closed (local)" state cannot be used for sending
- frames. Only WINDOW_UPDATE, PRIORITY and
- RST_STREAM frames can be sent in this state.
-
-
- A stream transitions from this state to "closed" when a frame that contains an
- END_STREAM flag is received, or when either peer sends a RST_STREAM
- frame.
-
-
- A receiver can ignore WINDOW_UPDATE frames in this state, which might
- arrive for a short period after a frame bearing the END_STREAM flag is sent.
-
-
- PRIORITY frames received in this state are used to reprioritize
- streams that depend on the current stream.
-
-
-
-
-
-
- A stream that is "half closed (remote)" is no longer being used by the peer to send
- frames. In this state, an endpoint is no longer obligated to maintain a receiver
- flow control window if it performs flow control.
-
-
- If an endpoint receives additional frames for a stream that is in this state, other
- than WINDOW_UPDATE, PRIORITY or
- RST_STREAM, it MUST respond with a stream error of type
- STREAM_CLOSED.
-
-
- A stream that is "half closed (remote)" can be used by the endpoint to send frames
- of any type. In this state, the endpoint continues to observe advertised stream level flow control limits.
-
-
- A stream can transition from this state to "closed" by sending a frame that contains
- an END_STREAM flag, or when either peer sends a RST_STREAM frame.
-
-
-
-
-
-
- The "closed" state is the terminal state.
-
-
- An endpoint MUST NOT send frames other than PRIORITY on a closed
- stream. An endpoint that receives any frame other than PRIORITY
- after receiving a RST_STREAM MUST treat that as a stream error of type
- STREAM_CLOSED. Similarly, an endpoint that receives any frames after
- receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type
- STREAM_CLOSED, unless the frame is permitted as described below.
-
-
- WINDOW_UPDATE or RST_STREAM frames can be received in
- this state for a short period after a DATA or HEADERS
- frame containing an END_STREAM flag is sent. Until the remote peer receives and
- processes RST_STREAM or the frame bearing the END_STREAM flag, it
- might send frames of these types. Endpoints MUST ignore
- WINDOW_UPDATE or RST_STREAM frames received in this
- state, though endpoints MAY choose to treat frames that arrive a significant time
- after sending END_STREAM as a connection
- error of type PROTOCOL_ERROR.
-
-
- PRIORITY frames can be sent on closed streams to prioritize streams
- that are dependent on the closed stream. Endpoints SHOULD process
- PRIORITY frame, though they can be ignored if the stream has been
- removed from the dependency tree (see ).
-
-
- If this state is reached as a result of sending a RST_STREAM frame,
- the peer that receives the RST_STREAM might have already sent - or
- enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint
- MUST ignore frames that it receives on closed streams after it has sent a
- RST_STREAM frame. An endpoint MAY choose to limit the period over
- which it ignores frames and treat frames that arrive after this time as being in
- error.
-
-
- Flow controlled frames (i.e., DATA) received after sending
- RST_STREAM are counted toward the connection flow control window.
- Even though these frames might be ignored, because they are sent before the sender
- receives the RST_STREAM, the sender will consider the frames to count
- against the flow control window.
-
-
- An endpoint might receive a PUSH_PROMISE frame after it sends
- RST_STREAM. PUSH_PROMISE causes a stream to become
- "reserved" even if the associated stream has been reset. Therefore, a
- RST_STREAM is needed to close an unwanted promised stream.
-
-
-
-
-
- In the absence of more specific guidance elsewhere in this document, implementations
- SHOULD treat the receipt of a frame that is not expressly permitted in the description of
- a state as a connection error of type
- PROTOCOL_ERROR. Frame of unknown types are ignored.
-
-
- An example of the state transitions for an HTTP request/response exchange can be found in
- . An example of the state transitions for server push can be
- found in and .
-
-
-
-
- Streams are identified with an unsigned 31-bit integer. Streams initiated by a client
- MUST use odd-numbered stream identifiers; those initiated by the server MUST use
- even-numbered stream identifiers. A stream identifier of zero (0x0) is used for
- connection control messages; the stream identifier zero cannot be used to establish a
- new stream.
-
-
- HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are
- responded to with a stream identifier of one (0x1). After the upgrade
- completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1
- cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1.
-
-
- The identifier of a newly established stream MUST be numerically greater than all
- streams that the initiating endpoint has opened or reserved. This governs streams that
- are opened using a HEADERS frame and streams that are reserved using
- PUSH_PROMISE. An endpoint that receives an unexpected stream identifier
- MUST respond with a connection error of
- type PROTOCOL_ERROR.
-
-
- The first use of a new stream identifier implicitly closes all streams in the "idle"
- state that might have been initiated by that peer with a lower-valued stream identifier.
- For example, if a client sends a HEADERS frame on stream 7 without ever
- sending a frame on stream 5, then stream 5 transitions to the "closed" state when the
- first frame for stream 7 is sent or received.
-
-
- Stream identifiers cannot be reused. Long-lived connections can result in an endpoint
- exhausting the available range of stream identifiers. A client that is unable to
- establish a new stream identifier can establish a new connection for new streams. A
- server that is unable to establish a new stream identifier can send a
- GOAWAY frame so that the client is forced to open a new connection for
- new streams.
-
-
-
-
-
- A peer can limit the number of concurrently active streams using the
- SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent
- streams setting is specific to each endpoint and applies only to the peer that receives
- the setting. That is, clients specify the maximum number of concurrent streams the
- server can initiate, and servers specify the maximum number of concurrent streams the
- client can initiate.
-
-
- Streams that are in the "open" state, or either of the "half closed" states count toward
- the maximum number of streams that an endpoint is permitted to open. Streams in any of
- these three states count toward the limit advertised in the
- SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the
- "reserved" states do not count toward the stream limit.
-
-
- Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a
- HEADERS frame that causes their advertised concurrent stream limit to be
- exceeded MUST treat this as a stream error. An
- endpoint that wishes to reduce the value of
- SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current
- number of open streams can either close streams that exceed the new value or allow
- streams to complete.
-
-
-
-
-
-
- Using streams for multiplexing introduces contention over use of the TCP connection,
- resulting in blocked streams. A flow control scheme ensures that streams on the same
- connection do not destructively interfere with each other. Flow control is used for both
- individual streams and for the connection as a whole.
-
-
- HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame.
-
-
-
-
- HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be
- used without requiring protocol changes. Flow control in HTTP/2 has the following
- characteristics:
-
-
- Flow control is specific to a connection; i.e., it is "hop-by-hop", not
- "end-to-end".
-
-
- Flow control is based on window update frames. Receivers advertise how many octets
- they are prepared to receive on a stream and for the entire connection. This is a
- credit-based scheme.
-
-
- Flow control is directional with overall control provided by the receiver. A
- receiver MAY choose to set any window size that it desires for each stream and for
- the entire connection. A sender MUST respect flow control limits imposed by a
- receiver. Clients, servers and intermediaries all independently advertise their
- flow control window as a receiver and abide by the flow control limits set by
- their peer when sending.
-
-
- The initial value for the flow control window is 65,535 octets for both new streams
- and the overall connection.
-
-
- The frame type determines whether flow control applies to a frame. Of the frames
- specified in this document, only DATA frames are subject to flow
- control; all other frame types do not consume space in the advertised flow control
- window. This ensures that important control frames are not blocked by flow control.
-
-
- Flow control cannot be disabled.
-
-
- HTTP/2 defines only the format and semantics of the WINDOW_UPDATE
- frame (). This document does not stipulate how a
- receiver decides when to send this frame or the value that it sends, nor does it
- specify how a sender chooses to send packets. Implementations are able to select
- any algorithm that suits their needs.
-
-
-
-
- Implementations are also responsible for managing how requests and responses are sent
- based on priority; choosing how to avoid head of line blocking for requests; and
- managing the creation of new streams. Algorithm choices for these could interact with
- any flow control algorithm.
-
-
-
-
-
- Flow control is defined to protect endpoints that are operating under resource
- constraints. For example, a proxy needs to share memory between many connections, and
- also might have a slow upstream connection and a fast downstream one. Flow control
- addresses cases where the receiver is unable process data on one stream, yet wants to
- continue to process other streams in the same connection.
-
-
- Deployments that do not require this capability can advertise a flow control window of
- the maximum size, incrementing the available space when new data is received. This
- effectively disables flow control for that receiver. Conversely, a sender is always
- subject to the flow control window advertised by the receiver.
-
-
- Deployments with constrained resources (for example, memory) can employ flow control to
- limit the amount of memory a peer can consume. Note, however, that this can lead to
- suboptimal use of available network resources if flow control is enabled without
- knowledge of the bandwidth-delay product (see ).
-
-
- Even with full awareness of the current bandwidth-delay product, implementation of flow
- control can be difficult. When using flow control, the receiver MUST read from the TCP
- receive buffer in a timely fashion. Failure to do so could lead to a deadlock when
- critical frames, such as WINDOW_UPDATE, are not read and acted upon.
-
-
-
-
-
-
- A client can assign a priority for a new stream by including prioritization information in
- the HEADERS frame that opens the stream. For an existing
- stream, the PRIORITY frame can be used to change the
- priority.
-
-
- The purpose of prioritization is to allow an endpoint to express how it would prefer its
- peer allocate resources when managing concurrent streams. Most importantly, priority can
- be used to select streams for transmitting frames when there is limited capacity for
- sending.
-
-
- Streams can be prioritized by marking them as dependent on the completion of other streams
- (). Each dependency is assigned a relative weight, a number
- that is used to determine the relative proportion of available resources that are assigned
- to streams dependent on the same stream.
-
-
-
- Explicitly setting the priority for a stream is input to a prioritization process. It
- does not guarantee any particular processing or transmission order for the stream relative
- to any other stream. An endpoint cannot force a peer to process concurrent streams in a
- particular order using priority. Expressing priority is therefore only ever a suggestion.
-
-
- Providing prioritization information is optional, so default values are used if no
- explicit indicator is provided ().
-
-
-
-
- Each stream can be given an explicit dependency on another stream. Including a
- dependency expresses a preference to allocate resources to the identified stream rather
- than to the dependent stream.
-
-
- A stream that is not dependent on any other stream is given a stream dependency of 0x0.
- In other words, the non-existent stream 0 forms the root of the tree.
-
-
- A stream that depends on another stream is a dependent stream. The stream upon which a
- stream is dependent is a parent stream. A dependency on a stream that is not currently
- in the tree - such as a stream in the "idle" state - results in that stream being given
- a default priority.
-
-
- When assigning a dependency on another stream, the stream is added as a new dependency
- of the parent stream. Dependent streams that share the same parent are not ordered with
- respect to each other. For example, if streams B and C are dependent on stream A, and
- if stream D is created with a dependency on stream A, this results in a dependency order
- of A followed by B, C, and D in any order.
-
-
-
- An exclusive flag allows for the insertion of a new level of dependencies. The
- exclusive flag causes the stream to become the sole dependency of its parent stream,
- causing other dependencies to become dependent on the exclusive stream. In the
- previous example, if stream D is created with an exclusive dependency on stream A, this
- results in D becoming the dependency parent of B and C.
-
-
-
- Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all
- of the streams that it depends on (the chain of parent streams up to 0x0) are either
- closed, or it is not possible to make progress on them.
-
-
- A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR.
-
-
-
-
-
- All dependent streams are allocated an integer weight between 1 and 256 (inclusive).
-
-
- Streams with the same parent SHOULD be allocated resources proportionally based on their
- weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A
- with weight 12, and if no progress can be made on A, stream B ideally receives one third
- of the resources allocated to stream C.
-
-
-
-
-
- Stream priorities are changed using the PRIORITY frame. Setting a
- dependency causes a stream to become dependent on the identified parent stream.
-
-
- Dependent streams move with their parent stream if the parent is reprioritized. Setting
- a dependency with the exclusive flag for a reprioritized stream moves all the
- dependencies of the new parent stream to become dependent on the reprioritized stream.
-
-
- If a stream is made dependent on one of its own dependencies, the formerly dependent
- stream is first moved to be dependent on the reprioritized stream's previous parent.
- The moved dependency retains its weight.
-
-
-
-
-
-
- When a stream is removed from the dependency tree, its dependencies can be moved to
- become dependent on the parent of the closed stream. The weights of new dependencies
- are recalculated by distributing the weight of the dependency of the closed stream
- proportionally based on the weights of its dependencies.
-
-
- Streams that are removed from the dependency tree cause some prioritization information
- to be lost. Resources are shared between streams with the same parent stream, which
- means that if a stream in that set closes or becomes blocked, any spare capacity
- allocated to a stream is distributed to the immediate neighbors of the stream. However,
- if the common dependency is removed from the tree, those streams share resources with
- streams at the next highest level.
-
-
- For example, assume streams A and B share a parent, and streams C and D both depend on
- stream A. Prior to the removal of stream A, if streams A and D are unable to proceed,
- then stream C receives all the resources dedicated to stream A. If stream A is removed
- from the tree, the weight of stream A is divided between streams C and D. If stream D
- is still unable to proceed, this results in stream C receiving a reduced proportion of
- resources. For equal starting weights, C receives one third, rather than one half, of
- available resources.
-
-
- It is possible for a stream to become closed while prioritization information that
- creates a dependency on that stream is in transit. If a stream identified in a
- dependency has no associated priority information, then the dependent stream is instead
- assigned a default priority. This potentially creates
- suboptimal prioritization, since the stream could be given a priority that is different
- to what is intended.
-
-
- To avoid these problems, an endpoint SHOULD retain stream prioritization state for a
- period after streams become closed. The longer state is retained, the lower the chance
- that streams are assigned incorrect or default priority values.
-
-
- This could create a large state burden for an endpoint, so this state MAY be limited.
- An endpoint MAY apply a fixed upper limit on the number of closed streams for which
- prioritization state is tracked to limit state exposure. The amount of additional state
- an endpoint maintains could be dependent on load; under high load, prioritization state
- can be discarded to limit resource commitments. In extreme cases, an endpoint could
- even discard prioritization state for active or reserved streams. If a fixed limit is
- applied, endpoints SHOULD maintain state for at least as many streams as allowed by
- their setting for SETTINGS_MAX_CONCURRENT_STREAMS.
-
-
- An endpoint receiving a PRIORITY frame that changes the priority of a
- closed stream SHOULD alter the dependencies of the streams that depend on it, if it has
- retained enough state to do so.
-
-
-
-
-
- Providing priority information is optional. Streams are assigned a non-exclusive
- dependency on stream 0x0 by default. Pushed streams
- initially depend on their associated stream. In both cases, streams are assigned a
- default weight of 16.
-
-
-
-
-
-
- HTTP/2 framing permits two classes of error:
-
-
- An error condition that renders the entire connection unusable is a connection error.
-
-
- An error in an individual stream is a stream error.
-
-
-
-
- A list of error codes is included in .
-
-
-
-
- A connection error is any error which prevents further processing of the framing layer,
- or which corrupts any connection state.
-
-
- An endpoint that encounters a connection error SHOULD first send a GOAWAY
- frame () with the stream identifier of the last stream that it
- successfully received from its peer. The GOAWAY frame includes an error
- code that indicates why the connection is terminating. After sending the
- GOAWAY frame, the endpoint MUST close the TCP connection.
-
-
- It is possible that the GOAWAY will not be reliably received by the
- receiving endpoint (see ). In the event of a connection error,
- GOAWAY only provides a best effort attempt to communicate with the peer
- about why the connection is being terminated.
-
-
- An endpoint can end a connection at any time. In particular, an endpoint MAY choose to
- treat a stream error as a connection error. Endpoints SHOULD send a
- GOAWAY frame when ending a connection, providing that circumstances
- permit it.
-
-
-
-
-
- A stream error is an error related to a specific stream that does not affect processing
- of other streams.
-
-
- An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error
- occurred. The RST_STREAM frame includes an error code that indicates the
- type of error.
-
-
- A RST_STREAM is the last frame that an endpoint can send on a stream.
- The peer that sends the RST_STREAM frame MUST be prepared to receive any
- frames that were sent or enqueued for sending by the remote peer. These frames can be
- ignored, except where they modify connection state (such as the state maintained for
- header compression, or flow control).
-
-
- Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for
- any stream. However, an endpoint MAY send additional RST_STREAM frames if
- it receives frames on a closed stream after more than a round-trip time. This behavior
- is permitted to deal with misbehaving implementations.
-
-
- An endpoint MUST NOT send a RST_STREAM in response to an
- RST_STREAM frame, to avoid looping.
-
-
-
-
-
- If the TCP connection is closed or reset while streams remain in open or half closed
- states, then the endpoint MUST assume that those streams were abnormally interrupted and
- could be incomplete.
-
-
-
-
-
-
- HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide
- additional services or alter any aspect of the protocol, within the limitations described
- in this section. Extensions are effective only within the scope of a single HTTP/2
- connection.
-
-
- Extensions are permitted to use new frame types, new
- settings, or new error
- codes. Registries are established for managing these extension points: frame types, settings and
- error codes.
-
-
- Implementations MUST ignore unknown or unsupported values in all extensible protocol
- elements. Implementations MUST discard frames that have unknown or unsupported types.
- This means that any of these extension points can be safely used by extensions without
- prior arrangement or negotiation. However, extension frames that appear in the middle of
- a header block are not permitted; these MUST be treated
- as a connection error of type
- PROTOCOL_ERROR.
-
-
- However, extensions that could change the semantics of existing protocol components MUST
- be negotiated before being used. For example, an extension that changes the layout of the
- HEADERS frame cannot be used until the peer has given a positive signal
- that this is acceptable. In this case, it could also be necessary to coordinate when the
- revised layout comes into effect. Note that treating any frame other than
- DATA frames as flow controlled is such a change in semantics, and can only
- be done through negotiation.
-
-
- This document doesn't mandate a specific method for negotiating the use of an extension,
- but notes that a setting could be used for that
- purpose. If both peers set a value that indicates willingness to use the extension, then
- the extension can be used. If a setting is used for extension negotiation, the initial
- value MUST be defined so that the extension is initially disabled.
-
-
-
-
-
-
- This specification defines a number of frame types, each identified by a unique 8-bit type
- code. Each frame type serves a distinct purpose either in the establishment and management
- of the connection as a whole, or of individual streams.
-
-
- The transmission of specific frame types can alter the state of a connection. If endpoints
- fail to maintain a synchronized view of the connection state, successful communication
- within the connection will no longer be possible. Therefore, it is important that endpoints
- have a shared comprehension of how the state is affected by the use any given frame.
-
-
-
-
- DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated
- with a stream. One or more DATA frames are used, for instance, to carry HTTP request or
- response payloads.
-
-
- DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to
- obscure the size of messages.
-
-
-
- The DATA frame contains the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is optional and is only present if the PADDED flag is set.
-
-
- Application data. The amount of data is the remainder of the frame payload after
- subtracting the length of the other fields that are present.
-
-
- Padding octets that contain no application semantic value. Padding octets MUST be set
- to zero when sending and ignored when receiving.
-
-
-
-
-
- The DATA frame defines the following flags:
-
-
- Bit 1 being set indicates that this frame is the last that the endpoint will send for
- the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state.
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it describes
- is present.
-
-
-
-
- DATA frames MUST be associated with a stream. If a DATA frame is received whose stream
- identifier field is 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
- DATA frames are subject to flow control and can only be sent when a stream is in the
- "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow
- control, including Pad Length and Padding fields if present. If a DATA frame is received
- whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond
- with a stream error of type
- STREAM_CLOSED.
-
-
- The total number of padding octets is determined by the value of the Pad Length field. If
- the length of the padding is greater than the length of the frame payload, the recipient
- MUST treat this as a connection error of
- type PROTOCOL_ERROR.
-
-
- A frame can be increased in size by one octet by including a Pad Length field with a
- value of zero.
-
-
-
-
- Padding is a security feature; see .
-
-
-
-
-
- The HEADERS frame (type=0x1) is used to open a stream,
- and additionally carries a header block fragment. HEADERS frames can be sent on a stream
- in the "open" or "half closed (remote)" states.
-
-
-
- The HEADERS frame payload has the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is only present if the PADDED flag is set.
-
-
- A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set.
-
-
- A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set.
-
-
- An 8-bit weight for the stream, see . Add one to the
- value to obtain a weight between 1 and 256. This field is only present if the
- PRIORITY flag is set.
-
-
- A header block fragment.
-
-
- Padding octets that contain no application semantic value. Padding octets MUST be set
- to zero when sending and ignored when receiving.
-
-
-
-
-
- The HEADERS frame defines the following flags:
-
-
-
- Bit 1 being set indicates that the header block is
- the last that the endpoint will send for the identified stream. Setting this flag
- causes the stream to enter one of "half closed"
- states.
-
-
- A HEADERS frame carries the END_STREAM flag that signals the end of a stream.
- However, a HEADERS frame with the END_STREAM flag set can be followed by
- CONTINUATION frames on the same stream. Logically, the
- CONTINUATION frames are part of the HEADERS frame.
-
-
-
-
- Bit 3 being set indicates that this frame contains an entire header block and is not followed by any
- CONTINUATION frames.
-
-
- A HEADERS frame without the END_HEADERS flag set MUST be followed by a
- CONTINUATION frame for the same stream. A receiver MUST treat the
- receipt of any other type of frame or a frame on a different stream as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it
- describes is present.
-
-
-
-
- Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight
- fields are present; see .
-
-
-
-
-
-
- The payload of a HEADERS frame contains a header block
- fragment. A header block that does not fit within a HEADERS frame is continued in
- a CONTINUATION frame.
-
-
-
- HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose
- stream identifier field is 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
-
- The HEADERS frame changes the connection state as described in .
-
-
-
- The HEADERS frame includes optional padding. Padding fields and flags are identical to
- those defined for DATA frames.
-
-
- Prioritization information in a HEADERS frame is logically equivalent to a separate
- PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in
- stream prioritization when new streams are created. Priorization fields in HEADERS frames
- subsequent to the first on a stream reprioritize the
- stream.
-
-
-
-
-
- The PRIORITY frame (type=0x2) specifies the sender-advised
- priority of a stream. It can be sent at any time for an existing stream, including
- closed streams. This enables reprioritization of existing streams.
-
-
-
- The payload of a PRIORITY frame contains the following fields:
-
-
- A single bit flag indicates that the stream dependency is exclusive, see .
-
-
- A 31-bit stream identifier for the stream that this stream depends on, see .
-
-
- An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256.
-
-
-
-
-
- The PRIORITY frame does not define any flags.
-
-
-
- The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received
- with a stream identifier of 0x0, the recipient MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
- The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open",
- "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be
- sent between consecutive frames that comprise a single header
- block. Note that this frame could arrive after processing or frame sending has
- completed, which would cause it to have no effect on the current stream. For a stream
- that is in the "half closed (remote)" or "closed" - state, this frame can only affect
- processing of the current stream and not frame transmission.
-
-
- The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state.
- This allows for the reprioritization of a group of dependent streams by altering the
- priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a
- closed stream risks being ignored due to the peer having discarded priority state
- information for that stream.
-
-
-
-
-
- The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by
- the initiator of a stream, it indicates that they wish to cancel the stream or that an
- error condition has occurred. When sent by the receiver of a stream, it indicates that
- either the receiver is rejecting the stream, requesting that the stream be cancelled, or
- that an error condition has occurred.
-
-
-
-
- The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being
- terminated.
-
-
-
- The RST_STREAM frame does not define any flags.
-
-
-
- The RST_STREAM frame fully terminates the referenced stream and causes it to enter the
- closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send
- additional frames for that stream, with the exception of PRIORITY. However,
- after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process
- additional frames sent on the stream that might have been sent by the peer prior to the
- arrival of the RST_STREAM.
-
-
-
- RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received
- with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type
- PROTOCOL_ERROR.
-
-
-
- RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM
- frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
-
-
- The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
- communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is
- also used to acknowledge the receipt of those parameters. Individually, a SETTINGS
- parameter can also be referred to as a "setting".
-
-
- SETTINGS parameters are not negotiated; they describe characteristics of the sending peer,
- which are used by the receiving peer. Different values for the same parameter can be
- advertised by each peer. For example, a client might set a high initial flow control
- window, whereas a server might set a lower value to conserve resources.
-
-
-
- A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be
- sent at any other time by either endpoint over the lifetime of the connection.
- Implementations MUST support all of the parameters defined by this specification.
-
-
-
- Each parameter in a SETTINGS frame replaces any existing value for that parameter.
- Parameters are processed in the order in which they appear, and a receiver of a SETTINGS
- frame does not need to maintain any state other than the current value of its
- parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by
- a receiver.
-
-
- SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS
- frame defines the following flag:
-
-
- Bit 1 being set indicates that this frame acknowledges receipt and application of the
- peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST
- be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value
- other than 0 MUST be treated as a connection
- error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization.
-
-
-
-
- SETTINGS frames always apply to a connection, never a single stream. The stream
- identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS
- frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond
- with a connection error of type
- PROTOCOL_ERROR.
-
-
- The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame
- MUST be treated as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- The payload of a SETTINGS frame consists of zero or more parameters, each consisting of
- an unsigned 16-bit setting identifier and an unsigned 32-bit value.
-
-
-
-
-
-
-
- The following parameters are defined:
-
-
-
- Allows the sender to inform the remote endpoint of the maximum size of the header
- compression table used to decode header blocks, in octets. The encoder can select
- any size equal to or less than this value by using signaling specific to the
- header compression format inside a header block. The initial value is 4,096
- octets.
-
-
-
-
- This setting can be use to disable server
- push. An endpoint MUST NOT send a PUSH_PROMISE frame if it
- receives this parameter set to a value of 0. An endpoint that has both set this
- parameter to 0 and had it acknowledged MUST treat the receipt of a
- PUSH_PROMISE frame as a connection error of type
- PROTOCOL_ERROR.
-
-
- The initial value is 1, which indicates that server push is permitted. Any value
- other than 0 or 1 MUST be treated as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- Indicates the maximum number of concurrent streams that the sender will allow.
- This limit is directional: it applies to the number of streams that the sender
- permits the receiver to create. Initially there is no limit to this value. It is
- recommended that this value be no smaller than 100, so as to not unnecessarily
- limit parallelism.
-
-
- A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special
- by endpoints. A zero value does prevent the creation of new streams, however this
- can also happen for any limit that is exhausted with active streams. Servers
- SHOULD only set a zero value for short durations; if a server does not wish to
- accept requests, closing the connection could be preferable.
-
-
-
-
- Indicates the sender's initial window size (in octets) for stream level flow
- control. The initial value is 216-1 (65,535) octets.
-
-
- This setting affects the window size of all streams, including existing streams,
- see .
-
-
- Values above the maximum flow control window size of 231-1 MUST
- be treated as a connection error of
- type FLOW_CONTROL_ERROR.
-
-
-
-
- Indicates the size of the largest frame payload that the sender is willing to
- receive, in octets.
-
-
- The initial value is 214 (16,384) octets. The value advertised by
- an endpoint MUST be between this initial value and the maximum allowed frame size
- (224-1 or 16,777,215 octets), inclusive. Values outside this range
- MUST be treated as a connection error
- of type PROTOCOL_ERROR.
-
-
-
-
- This advisory setting informs a peer of the maximum size of header list that the
- sender is prepared to accept, in octets. The value is based on the uncompressed
- size of header fields, including the length of the name and value in octets plus
- an overhead of 32 octets for each header field.
-
-
- For any given request, a lower limit than what is advertised MAY be enforced. The
- initial value of this setting is unlimited.
-
-
-
-
-
- An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier
- MUST ignore that setting.
-
-
-
-
-
- Most values in SETTINGS benefit from or require an understanding of when the peer has
- received and applied the changed parameter values. In order to provide
- such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag
- is not set MUST apply the updated parameters as soon as possible upon receipt.
-
-
- The values in the SETTINGS frame MUST be processed in the order they appear, with no
- other frame processing between values. Unsupported parameters MUST be ignored. Once
- all values have been processed, the recipient MUST immediately emit a SETTINGS frame
- with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender
- of the altered parameters can rely on the setting having been applied.
-
-
- If the sender of a SETTINGS frame does not receive an acknowledgement within a
- reasonable amount of time, it MAY issue a connection error of type
- SETTINGS_TIMEOUT.
-
-
-
-
-
-
- The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of
- streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned
- 31-bit identifier of the stream the endpoint plans to create along with a set of headers
- that provide additional context for the stream. contains a
- thorough description of the use of PUSH_PROMISE frames.
-
-
-
-
- The PUSH_PROMISE frame payload has the following fields:
-
-
- An 8-bit field containing the length of the frame padding in units of octets. This
- field is only present if the PADDED flag is set.
-
-
- A single reserved bit.
-
-
- An unsigned 31-bit integer that identifies the stream that is reserved by the
- PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next
- stream sent by the sender (see new stream
- identifier).
-
-
- A header block fragment containing request header
- fields.
-
-
- Padding octets.
-
-
-
-
-
- The PUSH_PROMISE frame defines the following flags:
-
-
-
- Bit 3 being set indicates that this frame contains an entire header block and is not followed by any
- CONTINUATION frames.
-
-
- A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a
- CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any
- other type of frame or a frame on a different stream as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- Bit 4 being set indicates that the Pad Length field and any padding that it
- describes is present.
-
-
-
-
-
-
- PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream
- identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the
- stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
-
- Promised streams are not required to be used in the order they are promised. The
- PUSH_PROMISE only reserves stream identifiers for later use.
-
-
-
- PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the
- peer endpoint is set to 0. An endpoint that has set this setting and has received
- acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type
- PROTOCOL_ERROR.
-
-
- Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a
- RST_STREAM referencing the promised stream identifier back to the sender of
- the PUSH_PROMISE.
-
-
-
- A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for
- header compression. PUSH_PROMISE also reserves a stream for later use, causing the
- promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a
- stream unless that stream is either "open" or "half closed (remote)"; the sender MUST
- ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST
- be in the "idle" state).
-
-
- Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream
- state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a
- stream that is neither "open" nor "half closed (local)" as a connection error of type
- PROTOCOL_ERROR. However, an endpoint that has sent
- RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that
- might have been created before the RST_STREAM frame is received and
- processed.
-
-
- A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a
- stream that is not currently in the "idle" state) as a connection error of type
- PROTOCOL_ERROR.
-
-
-
- The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical
- to those defined for DATA frames.
-
-
-
-
-
- The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the
- sender, as well as determining whether an idle connection is still functional. PING
- frames can be sent from any endpoint.
-
-
-
-
- In addition to the frame header, PING frames MUST contain 8 octets of data in the payload.
- A sender can include any value it chooses and use those bytes in any fashion.
-
-
- Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with
- the ACK flag set in response, with an identical payload. PING responses SHOULD be given
- higher priority than any other frame.
-
-
-
- The PING frame defines the following flags:
-
-
- Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST
- set this flag in PING responses. An endpoint MUST NOT respond to PING frames
- containing this flag.
-
-
-
-
- PING frames are not associated with any individual stream. If a PING frame is received
- with a stream identifier field value other than 0x0, the recipient MUST respond with a
- connection error of type
- PROTOCOL_ERROR.
-
-
- Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type
- FRAME_SIZE_ERROR.
-
-
-
-
-
-
- The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this
- connection. GOAWAY can be sent by either the client or the server. Once sent, the sender
- will ignore frames sent on any new streams with identifiers higher than the included last
- stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the
- connection, although a new connection can be established for new streams.
-
-
- The purpose of this frame is to allow an endpoint to gracefully stop accepting new
- streams, while still finishing processing of previously established streams. This enables
- administrative actions, like server maintainance.
-
-
- There is an inherent race condition between an endpoint starting new streams and the
- remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream
- identifier of the last peer-initiated stream which was or might be processed on the
- sending endpoint in this connection. For instance, if the server sends a GOAWAY frame,
- the identified stream is the highest numbered stream initiated by the client.
-
-
- If the receiver of the GOAWAY has sent data on streams with a higher stream identifier
- than what is indicated in the GOAWAY frame, those streams are not or will not be
- processed. The receiver of the GOAWAY frame can treat the streams as though they had
- never been created at all, thereby allowing those streams to be retried later on a new
- connection.
-
-
- Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote
- can know whether a stream has been partially processed or not. For example, if an HTTP
- client sends a POST at the same time that a server closes a connection, the client cannot
- know if the server started to process that POST request if the server does not send a
- GOAWAY frame to indicate what streams it might have acted on.
-
-
- An endpoint might choose to close a connection without sending GOAWAY for misbehaving
- peers.
-
-
-
-
- The GOAWAY frame does not define any flags.
-
-
- The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat
- a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type
- PROTOCOL_ERROR.
-
-
- The last stream identifier in the GOAWAY frame contains the highest numbered stream
- identifier for which the sender of the GOAWAY frame might have taken some action on, or
- might yet take action on. All streams up to and including the identified stream might
- have been processed in some way. The last stream identifier can be set to 0 if no streams
- were processed.
-
-
- In this context, "processed" means that some data from the stream was passed to some
- higher layer of software that might have taken some action as a result.
-
-
- If a connection terminates without a GOAWAY frame, the last stream identifier is
- effectively the highest possible stream identifier.
-
-
- On streams with lower or equal numbered identifiers that were not closed completely prior
- to the connection being closed, re-attempting requests, transactions, or any protocol
- activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or
- DELETE. Any protocol activity that uses higher numbered streams can be safely retried
- using a new connection.
-
-
- Activity on streams numbered lower or equal to the last stream identifier might still
- complete successfully. The sender of a GOAWAY frame might gracefully shut down a
- connection by sending a GOAWAY frame, maintaining the connection in an open state until
- all in-progress streams complete.
-
-
- An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an
- endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could
- subsequently encounter an condition that requires immediate termination of the connection.
- The last stream identifier from the last GOAWAY frame received indicates which streams
- could have been acted upon. Endpoints MUST NOT increase the value they send in the last
- stream identifier, since the peers might already have retried unprocessed requests on
- another connection.
-
-
- A client that is unable to retry requests loses all requests that are in flight when the
- server closes the connection. This is especially true for intermediaries that might
- not be serving clients using HTTP/2. A server that is attempting to gracefully shut down
- a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to
- 231-1 and a NO_ERROR code. This signals to the client that
- a shutdown is imminent and that no further requests can be initiated. After waiting at
- least one round trip time, the server can send another GOAWAY frame with an updated last
- stream identifier. This ensures that a connection can be cleanly shut down without losing
- requests.
-
-
-
- After sending a GOAWAY frame, the sender can discard frames for streams with identifiers
- higher than the identified last stream. However, any frames that alter connection state
- cannot be completely ignored. For instance, HEADERS,
- PUSH_PROMISE and CONTINUATION frames MUST be minimally
- processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow
- control window. Failure to process these frames can cause flow control or header
- compression state to become unsynchronized.
-
-
-
- The GOAWAY frame also contains a 32-bit error code that
- contains the reason for closing the connection.
-
-
- Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug
- data is intended for diagnostic purposes only and carries no semantic value. Debug
- information could contain security- or privacy-sensitive data. Logged or otherwise
- persistently stored debug data MUST have adequate safeguards to prevent unauthorized
- access.
-
-
-
-
-
- The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview.
-
-
- Flow control operates at two levels: on each individual stream and on the entire
- connection.
-
-
- Both types of flow control are hop-by-hop; that is, only between the two endpoints.
- Intermediaries do not forward WINDOW_UPDATE frames between dependent connections.
- However, throttling of data transfer by any receiver can indirectly cause the propagation
- of flow control information toward the original sender.
-
-
- Flow control only applies to frames that are identified as being subject to flow control.
- Of the frame types defined in this document, this includes only DATA frames.
- Frames that are exempt from flow control MUST be accepted and processed, unless the
- receiver is unable to assign resources to handling the frame. A receiver MAY respond with
- a stream error or connection error of type
- FLOW_CONTROL_ERROR if it is unable to accept a frame.
-
-
-
- The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer
- indicating the number of octets that the sender can transmit in addition to the existing
- flow control window. The legal range for the increment to the flow control window is 1 to
- 231-1 (0x7fffffff) octets.
-
-
- The WINDOW_UPDATE frame does not define any flags.
-
-
- The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the
- former case, the frame's stream identifier indicates the affected stream; in the latter,
- the value "0" indicates that the entire connection is the subject of the frame.
-
-
- A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window
- increment of 0 as a stream error of type
- PROTOCOL_ERROR; errors on the connection flow control window MUST be
- treated as a connection error.
-
-
- WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.
- This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)"
- or "closed" stream. A receiver MUST NOT treat this as an error, see .
-
-
- A receiver that receives a flow controlled frame MUST always account for its contribution
- against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the
- frame is in error. Since the sender counts the frame toward the flow control window, if
- the receiver does not, the flow control window at sender and receiver can become
- different.
-
-
-
-
- Flow control in HTTP/2 is implemented using a window kept by each sender on every
- stream. The flow control window is a simple integer value that indicates how many octets
- of data the sender is permitted to transmit; as such, its size is a measure of the
- buffering capacity of the receiver.
-
-
- Two flow control windows are applicable: the stream flow control window and the
- connection flow control window. The sender MUST NOT send a flow controlled frame with a
- length that exceeds the space available in either of the flow control windows advertised
- by the receiver. Frames with zero length with the END_STREAM flag set (that is, an
- empty DATA frame) MAY be sent if there is no available space in either
- flow control window.
-
-
- For flow control calculations, the 9 octet frame header is not counted.
-
-
- After sending a flow controlled frame, the sender reduces the space available in both
- windows by the length of the transmitted frame.
-
-
- The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up
- space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream
- and connection level flow control windows.
-
-
- A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the
- amount specified in the frame.
-
-
- A sender MUST NOT allow a flow control window to exceed 231-1 octets.
- If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this
- maximum it MUST terminate either the stream or the connection, as appropriate. For
- streams, the sender sends a RST_STREAM with the error code of
- FLOW_CONTROL_ERROR code; for the connection, a GOAWAY
- frame with a FLOW_CONTROL_ERROR code.
-
-
- Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are
- completely asynchronous with respect to each other. This property allows a receiver to
- aggressively update the window size kept by the sender to prevent streams from stalling.
-
-
-
-
-
- When an HTTP/2 connection is first established, new streams are created with an initial
- flow control window size of 65,535 octets. The connection flow control window is 65,535
- octets. Both endpoints can adjust the initial window size for new streams by including
- a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS
- frame that forms part of the connection preface. The connection flow control window can
- only be changed using WINDOW_UPDATE frames.
-
-
- Prior to receiving a SETTINGS frame that sets a value for
- SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default
- initial window size when sending flow controlled frames. Similarly, the connection flow
- control window is set to the default initial window size until a WINDOW_UPDATE frame is
- received.
-
-
- A SETTINGS frame can alter the initial flow control window size for all
- current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes,
- a receiver MUST adjust the size of all stream flow control windows that it maintains by
- the difference between the new value and the old value.
-
-
- A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in
- a flow control window to become negative. A sender MUST track the negative flow control
- window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE
- frames that cause the flow control window to become positive.
-
-
- For example, if the client sends 60KB immediately on connection establishment, and the
- server sets the initial window size to be 16KB, the client will recalculate the
- available flow control window to be -44KB on receipt of the SETTINGS
- frame. The client retains a negative flow control window until WINDOW_UPDATE frames
- restore the window to being positive, after which the client can resume sending.
-
-
- A SETTINGS frame cannot alter the connection flow control window.
-
-
- An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that
- causes any flow control window to exceed the maximum size as a connection error of type
- FLOW_CONTROL_ERROR.
-
-
-
-
-
- A receiver that wishes to use a smaller flow control window than the current size can
- send a new SETTINGS frame. However, the receiver MUST be prepared to
- receive data that exceeds this window size, since the sender might send data that
- exceeds the lower limit prior to processing the SETTINGS frame.
-
-
- After sending a SETTINGS frame that reduces the initial flow control window size, a
- receiver has two options for handling streams that exceed flow control limits:
-
-
- The receiver can immediately send RST_STREAM with
- FLOW_CONTROL_ERROR error code for the affected streams.
-
-
- The receiver can accept the streams and tolerate the resulting head of line
- blocking, sending WINDOW_UPDATE frames as it consumes data.
-
-
-
-
-
-
-
-
- The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can
- be sent on an existing stream, as long as the preceding frame is on the same stream and is
- a HEADERS, PUSH_PROMISE or CONTINUATION frame without the
- END_HEADERS flag set.
-
-
-
-
- The CONTINUATION frame payload contains a header block
- fragment.
-
-
-
- The CONTINUATION frame defines the following flag:
-
-
-
- Bit 3 being set indicates that this frame ends a header
- block.
-
-
- If the END_HEADERS bit is not set, this frame MUST be followed by another
- CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or
- a frame on a different stream as a connection
- error of type PROTOCOL_ERROR.
-
-
-
-
-
-
- The CONTINUATION frame changes the connection state as defined in .
-
-
-
- CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received
- whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR.
-
-
-
- A CONTINUATION frame MUST be preceded by a HEADERS,
- PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A
- recipient that observes violation of this rule MUST respond with a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
-
-
- Error codes are 32-bit fields that are used in RST_STREAM and
- GOAWAY frames to convey the reasons for the stream or connection error.
-
-
-
- Error codes share a common code space. Some error codes apply only to either streams or the
- entire connection and have no defined semantics in the other context.
-
-
-
- The following error codes are defined:
-
-
- The associated condition is not as a result of an error. For example, a
- GOAWAY might include this code to indicate graceful shutdown of a
- connection.
-
-
- The endpoint detected an unspecific protocol error. This error is for use when a more
- specific error code is not available.
-
-
- The endpoint encountered an unexpected internal error.
-
-
- The endpoint detected that its peer violated the flow control protocol.
-
-
- The endpoint sent a SETTINGS frame, but did not receive a response in a
- timely manner. See Settings Synchronization.
-
-
- The endpoint received a frame after a stream was half closed.
-
-
- The endpoint received a frame with an invalid size.
-
-
- The endpoint refuses the stream prior to performing any application processing, see
- for details.
-
-
- Used by the endpoint to indicate that the stream is no longer needed.
-
-
- The endpoint is unable to maintain the header compression context for the connection.
-
-
- The connection established in response to a CONNECT
- request was reset or abnormally closed.
-
-
- The endpoint detected that its peer is exhibiting a behavior that might be generating
- excessive load.
-
-
- The underlying transport has properties that do not meet minimum security
- requirements (see ).
-
-
-
-
- Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be
- treated by an implementation as being equivalent to INTERNAL_ERROR.
-
-
-
-
-
- HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means
- that, from the application perspective, the features of the protocol are largely
- unchanged. To achieve this, all request and response semantics are preserved, although the
- syntax of conveying those semantics has changed.
-
-
- Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax
- and Routing , such as the HTTP and HTTPS URI schemes, are also
- applicable in HTTP/2, but the expression of those semantics for this protocol are defined
- in the sections below.
-
-
-
-
- A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on
- the same stream as the request.
-
-
- An HTTP message (request or response) consists of:
-
-
- for a response only, zero or more HEADERS frames (each followed by zero
- or more CONTINUATION frames) containing the message headers of
- informational (1xx) HTTP responses (see and ),
- and
-
-
- one HEADERS frame (followed by zero or more CONTINUATION
- frames) containing the message headers (see ), and
-
-
- zero or more DATA frames containing the message payload (see ), and
-
-
- optionally, one HEADERS frame, followed by zero or more
- CONTINUATION frames containing the trailer-part, if present (see ).
-
-
- The last frame in the sequence bears an END_STREAM flag, noting that a
- HEADERS frame bearing the END_STREAM flag can be followed by
- CONTINUATION frames that carry any remaining portions of the header block.
-
-
- Other frames (from any stream) MUST NOT occur between either HEADERS frame
- and any CONTINUATION frames that might follow.
-
-
-
- Trailing header fields are carried in a header block that also terminates the stream.
- That is, a sequence starting with a HEADERS frame, followed by zero or more
- CONTINUATION frames, where the HEADERS frame bears an
- END_STREAM flag. Header blocks after the first that do not terminate the stream are not
- part of an HTTP request or response.
-
-
- A HEADERS frame (and associated CONTINUATION frames) can
- only appear at the start or end of a stream. An endpoint that receives a
- HEADERS frame without the END_STREAM flag set after receiving a final
- (non-informational) status code MUST treat the corresponding request or response as malformed.
-
-
-
- An HTTP request/response exchange fully consumes a single stream. A request starts with
- the HEADERS frame that puts the stream into an "open" state. The request
- ends with a frame bearing END_STREAM, which causes the stream to become "half closed
- (local)" for the client and "half closed (remote)" for the server. A response starts with
- a HEADERS frame and ends with a frame bearing END_STREAM, which places the
- stream in the "closed" state.
-
-
-
-
-
- HTTP/2 removes support for the 101 (Switching Protocols) informational status code
- ().
-
-
- The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol.
- Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate
- their use (see ).
-
-
-
-
-
- HTTP header fields carry information as a series of key-value pairs. For a listing of
- registered HTTP headers, see the Message Header Field Registry maintained at .
-
-
-
-
- While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the
- status code for the response, HTTP/2 uses special pseudo-header fields beginning with
- ':' character (ASCII 0x3a) for this purpose.
-
-
- Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate
- pseudo-header fields other than those defined in this document.
-
-
- Pseudo-header fields are only valid in the context in which they are defined.
- Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header
- fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST
- NOT appear in trailers. Endpoints MUST treat a request or response that contains
- undefined or invalid pseudo-header fields as malformed.
-
-
- Just as in HTTP/1.x, header field names are strings of ASCII characters that are
- compared in a case-insensitive fashion. However, header field names MUST be converted
- to lowercase prior to their encoding in HTTP/2. A request or response containing
- uppercase header field names MUST be treated as malformed.
-
-
- All pseudo-header fields MUST appear in the header block before regular header fields.
- Any request or response that contains a pseudo-header field that appears in a header
- block after a regular header field MUST be treated as malformed.
-
-
-
-
-
- HTTP/2 does not use the Connection header field to
- indicate connection-specific header fields; in this protocol, connection-specific
- metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message
- containing connection-specific header fields; any message containing
- connection-specific header fields MUST be treated as malformed.
-
-
- This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need
- to remove any header fields nominated by the Connection header field, along with the
- Connection header field itself. Such intermediaries SHOULD also remove other
- connection-specific header fields, such as Keep-Alive, Proxy-Connection,
- Transfer-Encoding and Upgrade, even if they are not nominated by Connection.
-
-
- One exception to this is the TE header field, which MAY be present in an HTTP/2
- request, but when it is MUST NOT contain any value other than "trailers".
-
-
-
-
- HTTP/2 purposefully does not support upgrade to another protocol. The handshake
- methods described in are believed sufficient to
- negotiate the use of alternative protocols.
-
-
-
-
-
-
-
- The following pseudo-header fields are defined for HTTP/2 requests:
-
-
-
- The :method pseudo-header field includes the HTTP
- method ().
-
-
-
-
- The :scheme pseudo-header field includes the scheme
- portion of the target URI ().
-
-
- :scheme is not restricted to http and https schemed URIs. A
- proxy or gateway can translate requests for non-HTTP schemes, enabling the use
- of HTTP to interact with non-HTTP services.
-
-
-
-
- The :authority pseudo-header field includes the
- authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http
- or https schemed URIs.
-
-
- To ensure that the HTTP/1.1 request line can be reproduced accurately, this
- pseudo-header field MUST be omitted when translating from an HTTP/1.1 request
- that has a request target in origin or asterisk form (see ). Clients that generate
- HTTP/2 requests directly SHOULD use the :authority pseudo-header
- field instead of the Host header field. An
- intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by
- copying the value of the :authority pseudo-header
- field.
-
-
-
-
- The :path pseudo-header field includes the path and
- query parts of the target URI (the path-absolute
- production from and optionally a '?' character
- followed by the query production, see and ). A request in asterisk form includes the value '*' for the
- :path pseudo-header field.
-
-
- This pseudo-header field MUST NOT be empty for http
- or https URIs; http or
- https URIs that do not contain a path component
- MUST include a value of '/'. The exception to this rule is an OPTIONS request
- for an http or https
- URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ).
-
-
-
-
-
- All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory
- pseudo-header fields is malformed.
-
-
- HTTP/2 does not define a way to carry the version identifier that is included in the
- HTTP/1.1 request line.
-
-
-
-
-
- For HTTP/2 responses, a single :status pseudo-header
- field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all
- responses, otherwise the response is malformed.
-
-
- HTTP/2 does not define a way to carry the version or reason phrase that is included in
- an HTTP/1.1 status line.
-
-
-
-
-
- The Cookie header field can carry a significant amount of
- redundant data.
-
-
- The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs").
- This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from
- being separated into different name-value pairs. This can significantly reduce
- compression efficiency as individual cookie-pairs are updated.
-
-
- To allow for better compression efficiency, the Cookie header field MAY be split into
- separate header fields, each with one or more cookie-pairs. If there are multiple
- Cookie header fields after decompression, these MUST be concatenated into a single
- octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ")
- before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a
- generic HTTP server application.
-
-
-
-
-
-
- A malformed request or response is one that is an otherwise valid sequence of HTTP/2
- frames, but is otherwise invalid due to the presence of extraneous frames, prohibited
- header fields, the absence of mandatory header fields, or the inclusion of uppercase
- header field names.
-
-
- A request or response that includes an entity body can include a content-length header field. A request or response is also
- malformed if the value of a content-length header field
- does not equal the sum of the DATA frame payload lengths that form the
- body. A response that is defined to have no payload, as described in , can have a non-zero
- content-length header field, even though no content is
- included in DATA frames.
-
-
- Intermediaries that process HTTP requests or responses (i.e., any intermediary not
- acting as a tunnel) MUST NOT forward a malformed request or response. Malformed
- requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR.
-
-
- For malformed requests, a server MAY send an HTTP response prior to closing or
- resetting the stream. Clients MUST NOT accept a malformed response. Note that these
- requirements are intended to protect against several types of common attacks against
- HTTP; they are deliberately strict, because being permissive can expose
- implementations to these vulnerabilities.
-
-
-
-
-
-
- This section shows HTTP/1.1 requests and responses, with illustrations of equivalent
- HTTP/2 requests and responses.
-
-
- An HTTP GET request includes request header fields and no body and is therefore
- transmitted as a single HEADERS frame, followed by zero or more
- CONTINUATION frames containing the serialized block of request header
- fields. The HEADERS frame in the following has both the END_HEADERS and
- END_STREAM flags set; no CONTINUATION frames are sent:
-
-
-
-
-
- Similarly, a response that includes only response header fields is transmitted as a
- HEADERS frame (again, followed by zero or more
- CONTINUATION frames) containing the serialized block of response header
- fields.
-
-
-
-
-
- An HTTP POST request that includes request header fields and payload data is transmitted
- as one HEADERS frame, followed by zero or more
- CONTINUATION frames containing the request header fields, followed by one
- or more DATA frames, with the last CONTINUATION (or
- HEADERS) frame having the END_HEADERS flag set and the final
- DATA frame having the END_STREAM flag set:
-
-
-
-
-
- A response that includes header fields and payload data is transmitted as a
- HEADERS frame, followed by zero or more CONTINUATION
- frames, followed by one or more DATA frames, with the last
- DATA frame in the sequence having the END_STREAM flag set:
-
-
-
-
-
- Trailing header fields are sent as a header block after both the request or response
- header block and all the DATA frames have been sent. The
- HEADERS frame starting the trailers header block has the END_STREAM flag
- set.
-
-
-
-
-
-
-
-
-
-
- In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error
- occurs, because there is no means to determine the nature of the error. It is possible
- that some server processing occurred prior to the error, which could result in
- undesirable effects if the request were reattempted.
-
-
- HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has
- not been processed:
-
-
- The GOAWAY frame indicates the highest stream number that might have
- been processed. Requests on streams with higher numbers are therefore guaranteed to
- be safe to retry.
-
-
- The REFUSED_STREAM error code can be included in a
- RST_STREAM frame to indicate that the stream is being closed prior to
- any processing having occurred. Any request that was sent on the reset stream can
- be safely retried.
-
-
-
-
- Requests that have not been processed have not failed; clients MAY automatically retry
- them, even those with non-idempotent methods.
-
-
- A server MUST NOT indicate that a stream has not been processed unless it can guarantee
- that fact. If frames that are on a stream are passed to the application layer for any
- stream, then REFUSED_STREAM MUST NOT be used for that stream, and a
- GOAWAY frame MUST include a stream identifier that is greater than or
- equal to the given stream identifier.
-
-
- In addition to these mechanisms, the PING frame provides a way for a
- client to easily test a connection. Connections that remain idle can become broken as
- some middleboxes (for instance, network address translators, or load balancers) silently
- discard connection bindings. The PING frame allows a client to safely
- test whether a connection is still active without sending a request.
-
-
-
-
-
-
- HTTP/2 allows a server to pre-emptively send (or "push") responses (along with
- corresponding "promised" requests) to a client in association with a previous
- client-initiated request. This can be useful when the server knows the client will need
- to have those responses available in order to fully process the response to the original
- request.
-
-
-
- Pushing additional message exchanges in this fashion is optional, and is negotiated
- between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set
- to 0 to indicate that server push is disabled.
-
-
- Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a
- promised request that is not cacheable, unsafe or that includes a request body MUST
- reset the stream with a stream error of type
- PROTOCOL_ERROR.
-
-
- Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP
- cache. Pushed responses are considered successfully validated on the origin server (e.g.,
- if the "no-cache" cache response directive is present) while the stream identified by the
- promised stream ID is still open.
-
-
- Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY
- be made available to the application separately.
-
-
- An intermediary can receive pushes from the server and choose not to forward them on to
- the client. In other words, how to make use of the pushed information is up to that
- intermediary. Equally, the intermediary might choose to make additional pushes to the
- client, without any action taken by the server.
-
-
- A client cannot push. Thus, servers MUST treat the receipt of a
- PUSH_PROMISE frame as a connection
- error of type PROTOCOL_ERROR. Clients MUST reject any attempt to
- change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating
- the message as a connection error of type
- PROTOCOL_ERROR.
-
-
-
-
- Server push is semantically equivalent to a server responding to a request; however, in
- this case that request is also sent by the server, as a PUSH_PROMISE
- frame.
-
-
- The PUSH_PROMISE frame includes a header block that contains a complete
- set of request header fields that the server attributes to the request. It is not
- possible to push a response to a request that includes a request body.
-
-
-
- Pushed responses are always associated with an explicit request from the client. The
- PUSH_PROMISE frames sent by the server are sent on that explicit
- request's stream. The PUSH_PROMISE frame also includes a promised stream
- identifier, chosen from the stream identifiers available to the server (see ).
-
-
-
- The header fields in PUSH_PROMISE and any subsequent
- CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in
- the :method header field that is safe and cacheable. If a
- client receives a PUSH_PROMISE that does not include a complete and valid
- set of header fields, or the :method header field identifies
- a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR.
-
-
-
- The server SHOULD send PUSH_PROMISE ()
- frames prior to sending any frames that reference the promised responses. This avoids a
- race where clients issue requests prior to receiving any PUSH_PROMISE
- frames.
-
-
- For example, if the server receives a request for a document containing embedded links
- to multiple image files, and the server chooses to push those additional images to the
- client, sending push promises before the DATA frames that contain the
- image links ensures that the client is able to see the promises before discovering
- embedded links. Similarly, if the server pushes responses referenced by the header block
- (for instance, in Link header fields), sending the push promises before sending the
- header block ensures that clients do not request them.
-
-
-
- PUSH_PROMISE frames MUST NOT be sent by the client.
-
-
- PUSH_PROMISE frames can be sent by the server in response to any
- client-initiated stream, but the stream MUST be in either the "open" or "half closed
- (remote)" state with respect to the server. PUSH_PROMISE frames are
- interspersed with the frames that comprise a response, though they cannot be
- interspersed with HEADERS and CONTINUATION frames that
- comprise a single header block.
-
-
- Sending a PUSH_PROMISE frame creates a new stream and puts the stream
- into the “reserved (local)” state for the server and the “reserved (remote)” state for
- the client.
-
-
-
-
-
- After sending the PUSH_PROMISE frame, the server can begin delivering the
- pushed response as a response on a server-initiated
- stream that uses the promised stream identifier. The server uses this stream to
- transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed"
- to the client after the initial HEADERS frame is sent.
-
-
-
- Once a client receives a PUSH_PROMISE frame and chooses to accept the
- pushed response, the client SHOULD NOT issue any requests for the promised response
- until after the promised stream has closed.
-
-
-
- If the client determines, for any reason, that it does not wish to receive the pushed
- response from the server, or if the server takes too long to begin sending the promised
- response, the client can send an RST_STREAM frame, using either the
- CANCEL or REFUSED_STREAM codes, and referencing the pushed
- stream's identifier.
-
-
- A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the
- number of responses that can be concurrently pushed by a server. Advertising a
- SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by
- preventing the server from creating the necessary streams. This does not prohibit a
- server from sending PUSH_PROMISE frames; clients need to reset any
- promised streams that are not wanted.
-
-
-
- Clients receiving a pushed response MUST validate that either the server is
- authoritative (see ), or the proxy that provided the pushed
- response is configured for the corresponding request. For example, a server that offers
- a certificate for only the example.com DNS-ID or Common Name
- is not permitted to push a response for https://www.example.org/doc.
-
-
- The response for a PUSH_PROMISE stream begins with a
- HEADERS frame, which immediately puts the stream into the “half closed
- (remote)” state for the server and “half closed (local)” state for the client, and ends
- with a frame bearing END_STREAM, which places the stream in the "closed" state.
-
-
- The client never sends a frame with the END_STREAM flag for a server push.
-
-
-
-
-
-
-
-
-
- In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host.
- CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin
- server for the purposes of interacting with https resources.
-
-
- In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to
- a remote host, for similar purposes. The HTTP header field mapping works as defined in
- Request Header Fields, with a few
- differences. Specifically:
-
-
- The :method header field is set to CONNECT.
-
-
- The :scheme and :path header
- fields MUST be omitted.
-
-
- The :authority header field contains the host and port to
- connect to (equivalent to the authority-form of the request-target of CONNECT
- requests, see ).
-
-
-
-
- A proxy that supports CONNECT establishes a TCP connection to
- the server identified in the :authority header field. Once
- this connection is successfully established, the proxy sends a HEADERS
- frame containing a 2xx series status code to the client, as defined in .
-
-
- After the initial HEADERS frame sent by each peer, all subsequent
- DATA frames correspond to data sent on the TCP connection. The payload of
- any DATA frames sent by the client is transmitted by the proxy to the TCP
- server; data received from the TCP server is assembled into DATA frames by
- the proxy. Frame types other than DATA or stream management frames
- (RST_STREAM, WINDOW_UPDATE, and PRIORITY)
- MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received.
-
-
- The TCP connection can be closed by either peer. The END_STREAM flag on a
- DATA frame is treated as being equivalent to the TCP FIN bit. A client is
- expected to send a DATA frame with the END_STREAM flag set after receiving
- a frame bearing the END_STREAM flag. A proxy that receives a DATA frame
- with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP
- segment. A proxy that receives a TCP segment with the FIN bit set sends a
- DATA frame with the END_STREAM flag set. Note that the final TCP segment
- or DATA frame could be empty.
-
-
- A TCP connection error is signaled with RST_STREAM. A proxy treats any
- error in the TCP connection, which includes receiving a TCP segment with the RST bit set,
- as a stream error of type
- CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the
- RST bit set if it detects an error with the stream or the HTTP/2 connection.
-
-
-
-
-
-
- This section outlines attributes of the HTTP protocol that improve interoperability, reduce
- exposure to known security vulnerabilities, or reduce the potential for implementation
- variation.
-
-
-
-
- HTTP/2 connections are persistent. For best performance, it is expected clients will not
- close connections until it is determined that no further communication with a server is
- necessary (for example, when a user navigates away from a particular web page), or until
- the server closes the connection.
-
-
- Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair,
- where host is derived from a URI, a selected alternative
- service, or a configured proxy.
-
-
- A client can create additional connections as replacements, either to replace connections
- that are near to exhausting the available stream
- identifier space, to refresh the keying material for a TLS connection, or to
- replace connections that have encountered errors.
-
-
- A client MAY open multiple connections to the same IP address and TCP port using different
- Server Name Indication values or to provide different TLS
- client certificates, but SHOULD avoid creating multiple connections with the same
- configuration.
-
-
- Servers are encouraged to maintain open connections for as long as possible, but are
- permitted to terminate idle connections if necessary. When either endpoint chooses to
- close the transport-layer TCP connection, the terminating endpoint SHOULD first send a
- GOAWAY () frame so that both endpoints can reliably
- determine whether previously sent frames have been processed and gracefully complete or
- terminate any necessary remaining tasks.
-
-
-
-
- Connections that are made to an origin servers, either directly or through a tunnel
- created using the CONNECT method MAY be reused for
- requests with multiple different URI authority components. A connection can be reused
- as long as the origin server is authoritative. For
- http resources, this depends on the host having resolved to
- the same IP address.
-
-
- For https resources, connection reuse additionally depends
- on having a certificate that is valid for the host in the URI. An origin server might
- offer a certificate with multiple subjectAltName attributes,
- or names with wildcards, one of which is valid for the authority in the URI. For
- example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for
- requests to URIs starting with https://a.example.com/ and
- https://b.example.com/.
-
-
- In some deployments, reusing a connection for multiple origins can result in requests
- being directed to the wrong origin server. For example, TLS termination might be
- performed by a middlebox that uses the TLS Server Name Indication
- (SNI) extension to select an origin server. This means that it is possible
- for clients to send confidential information to servers that might not be the intended
- target for the request, even though the server is otherwise authoritative.
-
-
- A server that does not wish clients to reuse connections can indicate that it is not
- authoritative for a request by sending a 421 (Misdirected Request) status code in response
- to the request (see ).
-
-
- A client that is configured to use a proxy over HTTP/2 directs requests to that proxy
- through a single connection. That is, all requests sent via a proxy reuse the
- connection to the proxy.
-
-
-
-
-
- The 421 (Misdirected Request) status code indicates that the request was directed at a
- server that is not able to produce a response. This can be sent by a server that is not
- configured to produce responses for the combination of scheme and authority that are
- included in the request URI.
-
-
- Clients receiving a 421 (Misdirected Request) response from a server MAY retry the
- request - whether the request method is idempotent or not - over a different connection.
- This is possible if a connection is reused () or if an alternative
- service is selected ().
-
-
- This status code MUST NOT be generated by proxies.
-
-
- A 421 response is cacheable by default; i.e., unless otherwise indicated by the method
- definition or explicit cache controls (see ).
-
-
-
-
-
-
- Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over
- TLS. The general TLS usage guidance in SHOULD be followed, with
- some additional restrictions that are specific to HTTP/2.
-
-
-
- An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on
- feature set and cipher suite described in this section. Due to implementation
- limitations, it might not be possible to fail TLS negotiation. An endpoint MUST
- immediately terminate an HTTP/2 connection that does not meet these minimum requirements
- with a connection error of type
- INADEQUATE_SECURITY.
-
-
-
-
- The TLS implementation MUST support the Server Name Indication
- (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when
- negotiating TLS.
-
-
- The TLS implementation MUST disable compression. TLS compression can lead to the
- exposure of information that would not otherwise be revealed .
- Generic compression is unnecessary since HTTP/2 provides compression features that are
- more aware of context and therefore likely to be more appropriate for use for
- performance, security or other reasons.
-
-
- The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS
- renegotiation as a connection error of type
- PROTOCOL_ERROR. Note that disabling renegotiation can result in
- long-lived connections becoming unusable due to limits on the number of messages the
- underlying cipher suite can encipher.
-
-
- A client MAY use renegotiation to provide confidentiality protection for client
- credentials offered in the handshake, but any renegotiation MUST occur prior to sending
- the connection preface. A server SHOULD request a client certificate if it sees a
- renegotiation request immediately after establishing a connection.
-
-
- This effectively prevents the use of renegotiation in response to a request for a
- specific protected resource. A future specification might provide a way to support this
- use case.
-
-
-
-
-
- The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST
- only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST
- have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE.
- Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher
- suites that use stream or block ciphers. Authenticated Encryption with Additional Data
- (AEAD) modes, such as the Galois Counter Model (GCM) mode for
- AES are acceptable.
-
-
- The effect of these restrictions is that TLS 1.2 implementations could have
- non-intersecting sets of available cipher suites, since these prevent the use of the
- cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of
- HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 .
-
-
- Clients MAY advertise support of cipher suites that are prohibited by the above
- restrictions in order to allow for connection to servers that do not support HTTP/2.
- This enables a fallback to protocols without these constraints without the additional
- latency imposed by using a separate connection for fallback.
-
-
-
-
-
-
-
-
- HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is
- authoritative in providing a given response, see . This relies on local name resolution for the "http"
- URI scheme, and the authenticated server identity for the "https" scheme (see ).
-
-
-
-
-
- In a cross-protocol attack, an attacker causes a client to initiate a transaction in one
- protocol toward a server that understands a different protocol. An attacker might be able
- to cause the transaction to appear as valid transaction in the second protocol. In
- combination with the capabilities of the web context, this can be used to interact with
- poorly protected servers in private networks.
-
-
- Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient
- protection against cross protocol attacks. ALPN provides a positive indication that a
- server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based
- protocols.
-
-
- The encryption in TLS makes it difficult for attackers to control the data which could be
- used in a cross-protocol attack on a cleartext protocol.
-
-
- The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks.
- The connection preface contains a string that is
- designed to confuse HTTP/1.1 servers, but no special protection is offered for other
- protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an
- Upgrade header field in addition to the client connection preface could be exposed to a
- cross-protocol attack.
-
-
-
-
-
- HTTP/2 header field names and values are encoded as sequences of octets with a length
- prefix. This enables HTTP/2 to carry any string of octets as the name or value of a
- header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1
- directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might
- exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal
- header fields, extra header fields, or even new messages that are entirely falsified.
-
-
- Header field names or values that contain characters not permitted by HTTP/1.1, including
- carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an
- intermediary, as stipulated in .
-
-
- Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker.
- Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values.
-
-
-
-
-
- Pushed responses do not have an explicit request from the client; the request
- is provided by the server in the PUSH_PROMISE frame.
-
-
- Caching responses that are pushed is possible based on the guidance provided by the origin
- server in the Cache-Control header field. However, this can cause issues if a single
- server hosts more than one tenant. For example, a server might offer multiple users each
- a small portion of its URI space.
-
-
- Where multiple tenants share space on the same server, that server MUST ensure that
- tenants are not able to push representations of resources that they do not have authority
- over. Failure to enforce this would allow a tenant to provide a representation that would
- be served out of cache, overriding the actual representation that the authoritative tenant
- provides.
-
-
- Pushed responses for which an origin server is not authoritative (see
- ) are never cached or used.
-
-
-
-
-
- An HTTP/2 connection can demand a greater commitment of resources to operate than a
- HTTP/1.1 connection. The use of header compression and flow control depend on a
- commitment of resources for storing a greater amount of state. Settings for these
- features ensure that memory commitments for these features are strictly bounded.
-
-
- The number of PUSH_PROMISE frames is not constrained in the same fashion.
- A client that accepts server push SHOULD limit the number of streams it allows to be in
- the "reserved (remote)" state. Excessive number of server push streams can be treated as
- a stream error of type
- ENHANCE_YOUR_CALM.
-
-
- Processing capacity cannot be guarded as effectively as state capacity.
-
-
- The SETTINGS frame can be abused to cause a peer to expend additional
- processing time. This might be done by pointlessly changing SETTINGS parameters, setting
- multiple undefined parameters, or changing the same setting multiple times in the same
- frame. WINDOW_UPDATE or PRIORITY frames can be abused to
- cause an unnecessary waste of resources.
-
-
- Large numbers of small or empty frames can be abused to cause a peer to expend time
- processing frame headers. Note however that some uses are entirely legitimate, such as
- the sending of an empty DATA frame to end a stream.
-
-
- Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses.
-
-
- Limits in SETTINGS parameters cannot be reduced instantaneously, which
- leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In
- particular, immediately after establishing a connection, limits set by a server are not
- known to clients and could be exceeded without being an obvious protocol violation.
-
-
- All these features - i.e., SETTINGS changes, small frames, header
- compression - have legitimate uses. These features become a burden only when they are
- used unnecessarily or to excess.
-
-
- An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of
- service attack. Implementations SHOULD track the use of these features and set limits on
- their use. An endpoint MAY treat activity that is suspicious as a connection error of type
- ENHANCE_YOUR_CALM.
-
-
-
-
- A large header block can cause an implementation to
- commit a large amount of state. Header fields that are critical for routing can appear
- toward the end of a header block, which prevents streaming of header fields to their
- ultimate destination. For this an other reasons, such as ensuring cache correctness,
- means that an endpoint might need to buffer the entire header block. Since there is no
- hard limit to the size of a header block, some endpoints could be forced commit a large
- amount of available memory for header fields.
-
-
- An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of
- limits that might apply on the size of header blocks. This setting is only advisory, so
- endpoints MAY choose to send header blocks that exceed this limit and risk having the
- request or response being treated as malformed. This setting specific to a connection,
- so any request or response could encounter a hop with a lower, unknown limit. An
- intermediary can attempt to avoid this problem by passing on values presented by
- different peers, but they are not obligated to do so.
-
-
- A server that receives a larger header block than it is willing to handle can send an
- HTTP 431 (Request Header Fields Too Large) status code . A
- client can discard responses that it cannot process. The header block MUST be processed
- to ensure a consistent connection state, unless the connection is closed.
-
-
-
-
-
-
- HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover
- secret data when it is compressed in the same context as data under attacker control.
-
-
- There are demonstrable attacks on compression that exploit the characteristics of the web
- (e.g., ). The attacker induces multiple requests containing
- varying plaintext, observing the length of the resulting ciphertext in each, which
- reveals a shorter length when a guess about the secret is correct.
-
-
- Implementations communicating on a secure channel MUST NOT compress content that includes
- both confidential and attacker-controlled data unless separate compression dictionaries
- are used for each source of data. Compression MUST NOT be used if the source of data
- cannot be reliably determined. Generic stream compression, such as that provided by TLS
- MUST NOT be used with HTTP/2 ().
-
-
- Further considerations regarding the compression of header fields are described in .
-
-
-
-
-
- Padding within HTTP/2 is not intended as a replacement for general purpose padding, such
- as might be provided by TLS. Redundant padding could even be
- counterproductive. Correct application can depend on having specific knowledge of the
- data that is being padded.
-
-
- To mitigate attacks that rely on compression, disabling or limiting compression might be
- preferable to padding as a countermeasure.
-
-
- Padding can be used to obscure the exact size of frame content, and is provided to
- mitigate specific attacks within HTTP. For example, attacks where compressed content
- includes both attacker-controlled plaintext and secret data (see for example, ).
-
-
- Use of padding can result in less protection than might seem immediately obvious. At
- best, padding only makes it more difficult for an attacker to infer length information by
- increasing the number of frames an attacker has to observe. Incorrectly implemented
- padding schemes can be easily defeated. In particular, randomized padding with a
- predictable distribution provides very little protection; similarly, padding payloads to a
- fixed size exposes information as payload sizes cross the fixed size boundary, which could
- be possible if an attacker can control plaintext.
-
-
- Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding
- for HEADERS and PUSH_PROMISE frames. A valid reason for an
- intermediary to change the amount of padding of frames is to improve the protections that
- padding provides.
-
-
-
-
-
- Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions
- of a single client or server over time. This includes the value of settings, the manner
- in which flow control windows are managed, the way priorities are allocated to streams,
- timing of reactions to stimulus, and handling of any optional features.
-
-
- As far as this creates observable differences in behavior, they could be used as a basis
- for fingerprinting a specific client, as defined in .
-
-
-
-
-
-
- A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation
- (ALPN) Protocol IDs" registry established in .
-
-
- This document establishes a registry for frame types, settings, and error codes. These new
- registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section.
-
-
- This document registers the HTTP2-Settings header field for
- use in HTTP; and the 421 (Misdirected Request) status code.
-
-
- This document registers the PRI method for use in HTTP, to avoid
- collisions with the connection preface.
-
-
-
-
- This document creates two registrations for the identification of HTTP/2 in the
- "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in .
-
-
- The "h2" string identifies HTTP/2 when used over TLS:
-
- HTTP/2 over TLS
- 0x68 0x32 ("h2")
- This document
-
-
-
- The "h2c" string identifies HTTP/2 when used over cleartext TCP:
-
- HTTP/2 over TCP
- 0x68 0x32 0x63 ("h2c")
- This document
-
-
-
-
-
-
- This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame
- Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under
- either of the "IETF Review" or "IESG Approval" policies for
- values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for
- experimental use.
-
-
- New entries in this registry require the following information:
-
-
- A name or label for the frame type.
-
-
- The 8-bit code assigned to the frame type.
-
-
- A reference to a specification that includes a description of the frame layout,
- it's semantics and flags that the frame type uses, including any parts of the frame
- that are conditionally present based on the value of flags.
-
-
-
-
- The entries in the following table are registered by this document.
-
-
- Frame Type
- Code
- Section
- DATA0x0
- HEADERS0x1
- PRIORITY0x2
- RST_STREAM0x3
- SETTINGS0x4
- PUSH_PROMISE0x5
- PING0x6
- GOAWAY0x7
- WINDOW_UPDATE0x8
- CONTINUATION0x9
-
-
-
-
-
- This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry
- manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to
- 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use.
-
-
- New registrations are advised to provide the following information:
-
-
- A symbolic name for the setting. Specifying a setting name is optional.
-
-
- The 16-bit code assigned to the setting.
-
-
- An initial value for the setting.
-
-
- An optional reference to a specification that describes the use of the setting.
-
-
-
-
- An initial set of setting registrations can be found in .
-
-
- Name
- Code
- Initial Value
- Specification
- HEADER_TABLE_SIZE
- 0x14096
- ENABLE_PUSH
- 0x21
- MAX_CONCURRENT_STREAMS
- 0x3(infinite)
- INITIAL_WINDOW_SIZE
- 0x465535
- MAX_FRAME_SIZE
- 0x516384
- MAX_HEADER_LIST_SIZE
- 0x6(infinite)
-
-
-
-
-
-
- This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code"
- registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the
- "Expert Review" policy.
-
-
- Registrations for error codes are required to include a description of the error code. An
- expert reviewer is advised to examine new registrations for possible duplication with
- existing error codes. Use of existing registrations is to be encouraged, but not
- mandated.
-
-
- New registrations are advised to provide the following information:
-
-
- A name for the error code. Specifying an error code name is optional.
-
-
- The 32-bit error code value.
-
-
- A brief description of the error code semantics, longer if no detailed specification
- is provided.
-
-
- An optional reference for a specification that defines the error code.
-
-
-
-
- The entries in the following table are registered by this document.
-
-
- Name
- Code
- Description
- Specification
- NO_ERROR0x0
- Graceful shutdown
-
- PROTOCOL_ERROR0x1
- Protocol error detected
-
- INTERNAL_ERROR0x2
- Implementation fault
-
- FLOW_CONTROL_ERROR0x3
- Flow control limits exceeded
-
- SETTINGS_TIMEOUT0x4
- Settings not acknowledged
-
- STREAM_CLOSED0x5
- Frame received for closed stream
-
- FRAME_SIZE_ERROR0x6
- Frame size incorrect
-
- REFUSED_STREAM0x7
- Stream not processed
-
- CANCEL0x8
- Stream cancelled
-
- COMPRESSION_ERROR0x9
- Compression state not updated
-
- CONNECT_ERROR0xa
- TCP connection error for CONNECT method
-
- ENHANCE_YOUR_CALM0xb
- Processing capacity exceeded
-
- INADEQUATE_SECURITY0xc
- Negotiated TLS parameters not acceptable
-
-
-
-
-
-
-
- This section registers the HTTP2-Settings header field in the
- Permanent Message Header Field Registry.
-
-
- HTTP2-Settings
-
-
- http
-
-
- standard
-
-
- IETF
-
-
- of this document
-
-
- This header field is only used by an HTTP/2 client for Upgrade-based negotiation.
-
-
-
-
-
-
-
- This section registers the PRI method in the HTTP Method
- Registry ().
-
-
- PRI
-
-
- No
-
-
- No
-
-
- of this document
-
-
- This method is never used by an actual client. This method will appear to be used
- when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection
- preface.
-
-
-
-
-
-
-
- This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext
- Transfer Protocol (HTTP) Status Code Registry ().
-
-
-
-
- 421
-
-
- Misdirected Request
-
-
- of this document
-
-
-
-
-
-
-
-
-
- This document includes substantial input from the following individuals:
-
-
- Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin
- Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin
- Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY
- contributors).
-
-
- Gabriel Montenegro and Willy Tarreau (Upgrade mechanism).
-
-
- William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto
- Peon, Rob Trace (Flow control).
-
-
- Mike Bishop (Extensibility).
-
-
- Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan
- (Substantial editorial contributions).
-
-
- Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp.
-
-
- Alexey Melnikov was an editor of this document during 2013.
-
-
- A substantial proportion of Martin's contribution was supported by Microsoft during his
- employment there.
-
-
-
-
-
-
-
-
-
-
- HPACK - Header Compression for HTTP/2
-
-
-
-
-
-
-
-
-
-
-
- Transmission Control Protocol
-
-
- University of Southern California (USC)/Information Sciences
- Institute
-
-
-
-
-
-
-
-
-
-
- Key words for use in RFCs to Indicate Requirement Levels
-
-
- Harvard University
- sob@harvard.edu
-
-
-
-
-
-
-
-
-
-
- HTTP Over TLS
-
-
-
-
-
-
-
-
-
- Uniform Resource Identifier (URI): Generic
- Syntax
-
-
-
-
-
-
-
-
-
-
-
- The Base16, Base32, and Base64 Data Encodings
-
-
-
-
-
-
-
-
- Guidelines for Writing an IANA Considerations Section in RFCs
-
-
-
-
-
-
-
-
-
-
- Augmented BNF for Syntax Specifications: ABNF
-
-
-
-
-
-
-
-
-
-
- The Transport Layer Security (TLS) Protocol Version 1.2
-
-
-
-
-
-
-
-
-
-
- Transport Layer Security (TLS) Extensions: Extension Definitions
-
-
-
-
-
-
-
-
-
- Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension
-
-
-
-
-
-
-
-
-
-
-
-
- TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois
- Counter Mode (GCM)
-
-
-
-
-
-
-
-
-
-
- Digital Signature Standard (DSS)
-
- NIST
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Range Requests
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- World Wide Web Consortium
- ylafon@w3.org
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Caching
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- Akamai
- mnot@mnot.net
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
- Hypertext Transfer Protocol (HTTP/1.1): Authentication
-
- Adobe Systems Incorporated
- fielding@gbiv.com
-
-
- greenbytes GmbH
- julian.reschke@greenbytes.de
-
-
-
-
-
-
-
-
-
- HTTP State Management Mechanism
-
-
-
-
-
-
-
-
-
-
-
- TCP Extensions for High Performance
-
-
-
-
-
-
-
-
-
-
-
- Transport Layer Security Protocol Compression Methods
-
-
-
-
-
-
-
-
- Additional HTTP Status Codes
-
-
-
-
-
-
-
-
-
-
- Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- AES Galois Counter Mode (GCM) Cipher Suites for TLS
-
-
-
-
-
-
-
-
-
-
-
- HTML5
-
-
-
-
-
-
-
-
-
-
- Latest version available at
- .
-
-
-
-
-
-
- Talking to Yourself for Fun and Profit
-
-
-
-
-
-
-
-
-
-
-
-
-
- BREACH: Reviving the CRIME Attack
-
-
-
-
-
-
-
-
-
-
- Registration Procedures for Message Header Fields
-
- Nine by Nine
- GK-IETF@ninebynine.org
-
-
- BEA Systems
- mnot@pobox.com
-
-
- HP Labs
- JeffMogul@acm.org
-
-
-
-
-
-
-
-
-
- Recommendations for Secure Use of TLS and DTLS
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- HTTP Alternative Services
-
-
- Akamai
-
-
- Mozilla
-
-
- greenbytes
-
-
-
-
-
-
-
-
-
-
- This section is to be removed by RFC Editor before publication.
-
-
-
-
- Renamed Not Authoritative status code to Misdirected Request.
-
-
-
-
-
- Pseudo-header fields are now required to appear strictly before regular ones.
-
-
- Restored 1xx series status codes, except 101.
-
-
- Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting
- to limit the damage.
-
-
- Added a setting to advise peers of header set size limits.
-
-
- Removed segments.
-
-
- Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping.
-
-
-
-
-
- Restored extensibility options.
-
-
- Restricting TLS cipher suites to AEAD only.
-
-
- Removing Content-Encoding requirements.
-
-
- Permitting the use of PRIORITY after stream close.
-
-
- Removed ALTSVC frame.
-
-
- Removed BLOCKED frame.
-
-
- Reducing the maximum padding size to 256 octets; removing padding from
- CONTINUATION frames.
-
-
- Removed per-frame GZIP compression.
-
-
-
-
-
- Added BLOCKED frame (at risk).
-
-
- Simplified priority scheme.
-
-
- Added DATA per-frame GZIP compression.
-
-
-
-
-
- Changed "connection header" to "connection preface" to avoid confusion.
-
-
- Added dependency-based stream prioritization.
-
-
- Added "h2c" identifier to distinguish between cleartext and secured HTTP/2.
-
-
- Adding missing padding to PUSH_PROMISE.
-
-
- Integrate ALTSVC frame and supporting text.
-
-
- Dropping requirement on "deflate" Content-Encoding.
-
-
- Improving security considerations around use of compression.
-
-
-
-
-
- Adding padding for data frames.
-
-
- Renumbering frame types, error codes, and settings.
-
-
- Adding INADEQUATE_SECURITY error code.
-
-
- Updating TLS usage requirements to 1.2; forbidding TLS compression.
-
-
- Removing extensibility for frames and settings.
-
-
- Changing setting identifier size.
-
-
- Removing the ability to disable flow control.
-
-
- Changing the protocol identification token to "h2".
-
-
- Changing the use of :authority to make it optional and to allow userinfo in non-HTTP
- cases.
-
-
- Allowing split on 0x0 for Cookie.
-
-
- Reserved PRI method in HTTP/1.1 to avoid possible future collisions.
-
-
-
-
-
- Added cookie crumbling for more efficient header compression.
-
-
- Added header field ordering with the value-concatenation mechanism.
-
-
-
-
-
- Marked draft for implementation.
-
-
-
-
-
- Adding definition for CONNECT method.
-
-
- Constraining the use of push to safe, cacheable methods with no request body.
-
-
- Changing from :host to :authority to remove any potential confusion.
-
-
- Adding setting for header compression table size.
-
-
- Adding settings acknowledgement.
-
-
- Removing unnecessary and potentially problematic flags from CONTINUATION.
-
-
- Added denial of service considerations.
-
-
-
-
- Marking the draft ready for implementation.
-
-
- Renumbering END_PUSH_PROMISE flag.
-
-
- Editorial clarifications and changes.
-
-
-
-
-
- Added CONTINUATION frame for HEADERS and PUSH_PROMISE.
-
-
- PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is
- zero.
-
-
- Push expanded to allow all safe methods without a request body.
-
-
- Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1
- hop-by-hop header fields.
-
-
- Requiring that intermediaries not forward requests with missing or illegal routing
- :-headers.
-
-
- Clarified requirements around handling different frames after stream close, stream reset
- and GOAWAY.
-
-
- Added more specific prohibitions for sending of different frame types in various stream
- states.
-
-
- Making the last received setting value the effective value.
-
-
- Clarified requirements on TLS version, extension and ciphers.
-
-
-
-
-
- Committed major restructuring atrocities.
-
-
- Added reference to first header compression draft.
-
-
- Added more formal description of frame lifecycle.
-
-
- Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA.
-
-
- Removed HEADERS+PRIORITY, added optional priority to HEADERS frame.
-
-
- Added PRIORITY frame.
-
-
-
-
-
- Added continuations to frames carrying header blocks.
-
-
- Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful
- concepts, like cookies.
-
-
- Removed "message".
-
-
- Switched to TLS ALPN from NPN.
-
-
- Editorial changes.
-
-
-
-
-
- Added IANA considerations section for frame types, error codes and settings.
-
-
- Removed data frame compression.
-
-
- Added PUSH_PROMISE.
-
-
- Added globally applicable flags to framing.
-
-
- Removed zlib-based header compression mechanism.
-
-
- Updated references.
-
-
- Clarified stream identifier reuse.
-
-
- Removed CREDENTIALS frame and associated mechanisms.
-
-
- Added advice against naive implementation of flow control.
-
-
- Added session header section.
-
-
- Restructured frame header. Removed distinction between data and control frames.
-
-
- Altered flow control properties to include session-level limits.
-
-
- Added note on cacheability of pushed resources and multiple tenant servers.
-
-
- Changed protocol label form based on discussions.
-
-
-
-
-
- Changed title throughout.
-
-
- Removed section on Incompatibilities with SPDY draft#2.
-
-
- Changed INTERNAL_ERROR on GOAWAY to have a value of 2 .
-
-
- Replaced abstract and introduction.
-
-
- Added section on starting HTTP/2.0, including upgrade mechanism.
-
-
- Removed unused references.
-
-
- Added flow control principles based on .
-
-
-
-
-
- Adopted as base for draft-ietf-httpbis-http2.
-
-
- Updated authors/editors list.
-
-
- Added status note.
-
-
-
-
-
-
-
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
deleted file mode 100644
index 7fe267bb..00000000
--- a/vendor/golang.org/x/net/http2/transport.go
+++ /dev/null
@@ -1,1666 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Transport code.
-
-package http2
-
-import (
- "bufio"
- "bytes"
- "compress/gzip"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/http2/hpack"
-)
-
-const (
- // transportDefaultConnFlow is how many connection-level flow control
- // tokens we give the server at start-up, past the default 64k.
- transportDefaultConnFlow = 1 << 30
-
- // transportDefaultStreamFlow is how many stream-level flow
- // control tokens we announce to the peer, and how many bytes
- // we buffer per stream.
- transportDefaultStreamFlow = 4 << 20
-
- // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
- // a stream-level WINDOW_UPDATE for at a time.
- transportDefaultStreamMinRefresh = 4 << 10
-
- defaultUserAgent = "Go-http-client/2.0"
-)
-
-// Transport is an HTTP/2 Transport.
-//
-// A Transport internally caches connections to servers. It is safe
-// for concurrent use by multiple goroutines.
-type Transport struct {
- // DialTLS specifies an optional dial function for creating
- // TLS connections for requests.
- //
- // If DialTLS is nil, tls.Dial is used.
- //
- // If the returned net.Conn has a ConnectionState method like tls.Conn,
- // it will be used to set http.Response.TLS.
- DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
-
- // TLSClientConfig specifies the TLS configuration to use with
- // tls.Client. If nil, the default configuration is used.
- TLSClientConfig *tls.Config
-
- // ConnPool optionally specifies an alternate connection pool to use.
- // If nil, the default is used.
- ConnPool ClientConnPool
-
- // DisableCompression, if true, prevents the Transport from
- // requesting compression with an "Accept-Encoding: gzip"
- // request header when the Request contains no existing
- // Accept-Encoding value. If the Transport requests gzip on
- // its own and gets a gzipped response, it's transparently
- // decoded in the Response.Body. However, if the user
- // explicitly requested gzip it is not automatically
- // uncompressed.
- DisableCompression bool
-
- // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
- // send in the initial settings frame. It is how many bytes
- // of response headers are allow. Unlike the http2 spec, zero here
- // means to use a default limit (currently 10MB). If you actually
- // want to advertise an ulimited value to the peer, Transport
- // interprets the highest possible value here (0xffffffff or 1<<32-1)
- // to mean no limit.
- MaxHeaderListSize uint32
-
- // t1, if non-nil, is the standard library Transport using
- // this transport. Its settings are used (but not its
- // RoundTrip method, etc).
- t1 *http.Transport
-
- connPoolOnce sync.Once
- connPoolOrDef ClientConnPool // non-nil version of ConnPool
-}
-
-func (t *Transport) maxHeaderListSize() uint32 {
- if t.MaxHeaderListSize == 0 {
- return 10 << 20
- }
- if t.MaxHeaderListSize == 0xffffffff {
- return 0
- }
- return t.MaxHeaderListSize
-}
-
-func (t *Transport) disableCompression() bool {
- return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
-}
-
-var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
-
-// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
-// It requires Go 1.6 or later and returns an error if the net/http package is too old
-// or if t1 has already been HTTP/2-enabled.
-func ConfigureTransport(t1 *http.Transport) error {
- _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
- return err
-}
-
-func (t *Transport) connPool() ClientConnPool {
- t.connPoolOnce.Do(t.initConnPool)
- return t.connPoolOrDef
-}
-
-func (t *Transport) initConnPool() {
- if t.ConnPool != nil {
- t.connPoolOrDef = t.ConnPool
- } else {
- t.connPoolOrDef = &clientConnPool{t: t}
- }
-}
-
-// ClientConn is the state of a single HTTP/2 client connection to an
-// HTTP/2 server.
-type ClientConn struct {
- t *Transport
- tconn net.Conn // usually *tls.Conn, except specialized impls
- tlsState *tls.ConnectionState // nil only for specialized impls
-
- // readLoop goroutine fields:
- readerDone chan struct{} // closed on error
- readerErr error // set before readerDone is closed
-
- mu sync.Mutex // guards following
- cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow flow // our conn-level flow control quota (cs.flow is per stream)
- inflow flow // peer's conn-level flow control
- closed bool
- goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
- streams map[uint32]*clientStream // client-initiated
- nextStreamID uint32
- bw *bufio.Writer
- br *bufio.Reader
- fr *Framer
- // Settings from peer:
- maxFrameSize uint32
- maxConcurrentStreams uint32
- initialWindowSize uint32
- hbuf bytes.Buffer // HPACK encoder writes into this
- henc *hpack.Encoder
- freeBuf [][]byte
-
- wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
- werr error // first write error that has occurred
-}
-
-// clientStream is the state for a single HTTP/2 stream. One of these
-// is created for each Transport.RoundTrip call.
-type clientStream struct {
- cc *ClientConn
- req *http.Request
- ID uint32
- resc chan resAndError
- bufPipe pipe // buffered pipe with the flow-controlled response payload
- requestedGzip bool
-
- flow flow // guarded by cc.mu
- inflow flow // guarded by cc.mu
- bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
- readErr error // sticky read error; owned by transportResponseBody.Read
- stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
-
- peerReset chan struct{} // closed on peer reset
- resetErr error // populated before peerReset is closed
-
- done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
-
- // owned by clientConnReadLoop:
- pastHeaders bool // got first MetaHeadersFrame (actual headers)
- pastTrailers bool // got optional second MetaHeadersFrame (trailers)
-
- trailer http.Header // accumulated trailers
- resTrailer *http.Header // client's Response.Trailer
-}
-
-// awaitRequestCancel runs in its own goroutine and waits for the user
-// to either cancel a RoundTrip request (using the provided
-// Request.Cancel channel), or for the request to be done (any way it
-// might be removed from the cc.streams map: peer reset, successful
-// completion, TCP connection breakage, etc)
-func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) {
- if cancel == nil {
- return
- }
- select {
- case <-cancel:
- cs.bufPipe.CloseWithError(errRequestCanceled)
- cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- case <-cs.done:
- }
-}
-
-// checkReset reports any error sent in a RST_STREAM frame by the
-// server.
-func (cs *clientStream) checkReset() error {
- select {
- case <-cs.peerReset:
- return cs.resetErr
- default:
- return nil
- }
-}
-
-func (cs *clientStream) abortRequestBodyWrite(err error) {
- if err == nil {
- panic("nil error")
- }
- cc := cs.cc
- cc.mu.Lock()
- cs.stopReqBody = err
- cc.cond.Broadcast()
- cc.mu.Unlock()
-}
-
-type stickyErrWriter struct {
- w io.Writer
- err *error
-}
-
-func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
- if *sew.err != nil {
- return 0, *sew.err
- }
- n, err = sew.w.Write(p)
- *sew.err = err
- return
-}
-
-var ErrNoCachedConn = errors.New("http2: no cached connection was available")
-
-// RoundTripOpt are options for the Transport.RoundTripOpt method.
-type RoundTripOpt struct {
- // OnlyCachedConn controls whether RoundTripOpt may
- // create a new TCP connection. If set true and
- // no cached connection is available, RoundTripOpt
- // will return ErrNoCachedConn.
- OnlyCachedConn bool
-}
-
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- return t.RoundTripOpt(req, RoundTripOpt{})
-}
-
-// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
-// and returns a host:port. The port 443 is added if needed.
-func authorityAddr(authority string) (addr string) {
- if _, _, err := net.SplitHostPort(authority); err == nil {
- return authority
- }
- return net.JoinHostPort(authority, "443")
-}
-
-// RoundTripOpt is like RoundTrip, but takes options.
-func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
- if req.URL.Scheme != "https" {
- return nil, errors.New("http2: unsupported scheme")
- }
-
- addr := authorityAddr(req.URL.Host)
- for {
- cc, err := t.connPool().GetClientConn(req, addr)
- if err != nil {
- t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
- return nil, err
- }
- res, err := cc.RoundTrip(req)
- if shouldRetryRequest(req, err) {
- continue
- }
- if err != nil {
- t.vlogf("RoundTrip failure: %v", err)
- return nil, err
- }
- return res, nil
- }
-}
-
-// CloseIdleConnections closes any connections which were previously
-// connected from previous requests but are now sitting idle.
-// It does not interrupt any connections currently in use.
-func (t *Transport) CloseIdleConnections() {
- if cp, ok := t.connPool().(*clientConnPool); ok {
- cp.closeIdleConnections()
- }
-}
-
-var (
- errClientConnClosed = errors.New("http2: client conn is closed")
- errClientConnUnusable = errors.New("http2: client conn not usable")
-)
-
-func shouldRetryRequest(req *http.Request, err error) bool {
- // TODO: retry GET requests (no bodies) more aggressively, if shutdown
- // before response.
- return err == errClientConnUnusable
-}
-
-func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
- if err != nil {
- return nil, err
- }
- return t.NewClientConn(tconn)
-}
-
-func (t *Transport) newTLSConfig(host string) *tls.Config {
- cfg := new(tls.Config)
- if t.TLSClientConfig != nil {
- *cfg = *t.TLSClientConfig
- }
- if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
- cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
- }
- if cfg.ServerName == "" {
- cfg.ServerName = host
- }
- return cfg
-}
-
-func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
- if t.DialTLS != nil {
- return t.DialTLS
- }
- return t.dialTLSDefault
-}
-
-func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
- cn, err := tls.Dial(network, addr, cfg)
- if err != nil {
- return nil, err
- }
- if err := cn.Handshake(); err != nil {
- return nil, err
- }
- if !cfg.InsecureSkipVerify {
- if err := cn.VerifyHostname(cfg.ServerName); err != nil {
- return nil, err
- }
- }
- state := cn.ConnectionState()
- if p := state.NegotiatedProtocol; p != NextProtoTLS {
- return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
- }
- if !state.NegotiatedProtocolIsMutual {
- return nil, errors.New("http2: could not negotiate protocol mutually")
- }
- return cn, nil
-}
-
-// disableKeepAlives reports whether connections should be closed as
-// soon as possible after handling the first request.
-func (t *Transport) disableKeepAlives() bool {
- return t.t1 != nil && t.t1.DisableKeepAlives
-}
-
-func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- if VerboseLogs {
- t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
- }
- if _, err := c.Write(clientPreface); err != nil {
- t.vlogf("client preface write error: %v", err)
- return nil, err
- }
-
- cc := &ClientConn{
- t: t,
- tconn: c,
- readerDone: make(chan struct{}),
- nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
- streams: make(map[uint32]*clientStream),
- }
- cc.cond = sync.NewCond(&cc.mu)
- cc.flow.add(int32(initialWindowSize))
-
- // TODO: adjust this writer size to account for frame size +
- // MTU + crypto/tls record padding.
- cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
- cc.br = bufio.NewReader(c)
- cc.fr = NewFramer(cc.bw, cc.br)
- cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
- cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
-
- // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
- // henc in response to SETTINGS frames?
- cc.henc = hpack.NewEncoder(&cc.hbuf)
-
- if cs, ok := c.(connectionStater); ok {
- state := cs.ConnectionState()
- cc.tlsState = &state
- }
-
- initialSettings := []Setting{
- Setting{ID: SettingEnablePush, Val: 0},
- Setting{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
- }
- if max := t.maxHeaderListSize(); max != 0 {
- initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
- }
- cc.fr.WriteSettings(initialSettings...)
- cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
- cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
- cc.bw.Flush()
- if cc.werr != nil {
- return nil, cc.werr
- }
-
- // Read the obligatory SETTINGS frame
- f, err := cc.fr.ReadFrame()
- if err != nil {
- return nil, err
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- return nil, fmt.Errorf("expected settings frame, got: %T", f)
- }
- cc.fr.WriteSettingsAck()
- cc.bw.Flush()
-
- sf.ForeachSetting(func(s Setting) error {
- switch s.ID {
- case SettingMaxFrameSize:
- cc.maxFrameSize = s.Val
- case SettingMaxConcurrentStreams:
- cc.maxConcurrentStreams = s.Val
- case SettingInitialWindowSize:
- cc.initialWindowSize = s.Val
- default:
- // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
- t.vlogf("Unhandled Setting: %v", s)
- }
- return nil
- })
-
- go cc.readLoop()
- return cc, nil
-}
-
-func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- cc.goAway = f
-}
-
-func (cc *ClientConn) CanTakeNewRequest() bool {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return cc.canTakeNewRequestLocked()
-}
-
-func (cc *ClientConn) canTakeNewRequestLocked() bool {
- return cc.goAway == nil && !cc.closed &&
- int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
- cc.nextStreamID < 2147483647
-}
-
-func (cc *ClientConn) closeIfIdle() {
- cc.mu.Lock()
- if len(cc.streams) > 0 {
- cc.mu.Unlock()
- return
- }
- cc.closed = true
- // TODO: do clients send GOAWAY too? maybe? Just Close:
- cc.mu.Unlock()
-
- cc.tconn.Close()
-}
-
-const maxAllocFrameSize = 512 << 10
-
-// frameBuffer returns a scratch buffer suitable for writing DATA frames.
-// They're capped at the min of the peer's max frame size or 512KB
-// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
-// bufers.
-func (cc *ClientConn) frameScratchBuffer() []byte {
- cc.mu.Lock()
- size := cc.maxFrameSize
- if size > maxAllocFrameSize {
- size = maxAllocFrameSize
- }
- for i, buf := range cc.freeBuf {
- if len(buf) >= int(size) {
- cc.freeBuf[i] = nil
- cc.mu.Unlock()
- return buf[:size]
- }
- }
- cc.mu.Unlock()
- return make([]byte, size)
-}
-
-func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
- if len(cc.freeBuf) < maxBufs {
- cc.freeBuf = append(cc.freeBuf, buf)
- return
- }
- for i, old := range cc.freeBuf {
- if old == nil {
- cc.freeBuf[i] = buf
- return
- }
- }
- // forget about it.
-}
-
-// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
-// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
-var errRequestCanceled = errors.New("net/http: request canceled")
-
-func commaSeparatedTrailers(req *http.Request) (string, error) {
- keys := make([]string, 0, len(req.Trailer))
- for k := range req.Trailer {
- k = http.CanonicalHeaderKey(k)
- switch k {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return "", &badStringError{"invalid Trailer key", k}
- }
- keys = append(keys, k)
- }
- if len(keys) > 0 {
- sort.Strings(keys)
- // TODO: could do better allocation-wise here, but trailers are rare,
- // so being lazy for now.
- return strings.Join(keys, ","), nil
- }
- return "", nil
-}
-
-func (cc *ClientConn) responseHeaderTimeout() time.Duration {
- if cc.t.t1 != nil {
- return cc.t.t1.ResponseHeaderTimeout
- }
- // No way to do this (yet?) with just an http2.Transport. Probably
- // no need. Request.Cancel this is the new way. We only need to support
- // this for compatibility with the old http.Transport fields when
- // we're doing transparent http2.
- return 0
-}
-
-// checkConnHeaders checks whether req has any invalid connection-level headers.
-// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
-// Certain headers are special-cased as okay but not transmitted later.
-func checkConnHeaders(req *http.Request) error {
- if v := req.Header.Get("Upgrade"); v != "" {
- return errors.New("http2: invalid Upgrade request header")
- }
- if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
- return errors.New("http2: invalid Transfer-Encoding request header")
- }
- if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
- return errors.New("http2: invalid Connection request header")
- }
- return nil
-}
-
-func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
- if err := checkConnHeaders(req); err != nil {
- return nil, err
- }
-
- trailers, err := commaSeparatedTrailers(req)
- if err != nil {
- return nil, err
- }
- hasTrailers := trailers != ""
-
- var body io.Reader = req.Body
- contentLen := req.ContentLength
- if req.Body != nil && contentLen == 0 {
- // Test to see if it's actually zero or just unset.
- var buf [1]byte
- n, rerr := io.ReadFull(body, buf[:])
- if rerr != nil && rerr != io.EOF {
- contentLen = -1
- body = errorReader{rerr}
- } else if n == 1 {
- // Oh, guess there is data in this Body Reader after all.
- // The ContentLength field just wasn't set.
- // Stich the Body back together again, re-attaching our
- // consumed byte.
- contentLen = -1
- body = io.MultiReader(bytes.NewReader(buf[:]), body)
- } else {
- // Body is actually empty.
- body = nil
- }
- }
-
- cc.mu.Lock()
- if cc.closed || !cc.canTakeNewRequestLocked() {
- cc.mu.Unlock()
- return nil, errClientConnUnusable
- }
-
- cs := cc.newStream()
- cs.req = req
- hasBody := body != nil
-
- // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- if !cc.t.disableCompression() &&
- req.Header.Get("Accept-Encoding") == "" &&
- req.Header.Get("Range") == "" &&
- req.Method != "HEAD" {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
- //
- // Note that we don't request this for HEAD requests,
- // due to a bug in nginx:
- // http://trac.nginx.org/nginx/ticket/358
- // https://golang.org/issue/5522
- //
- // We don't request gzip if the request is for a range, since
- // auto-decoding a portion of a gzipped document will just fail
- // anyway. See https://golang.org/issue/8923
- cs.requestedGzip = true
- }
-
- // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
- // sent by writeRequestBody below, along with any Trailers,
- // again in form HEADERS{1}, CONTINUATION{0,})
- hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
- cc.wmu.Lock()
- endStream := !hasBody && !hasTrailers
- werr := cc.writeHeaders(cs.ID, endStream, hdrs)
- cc.wmu.Unlock()
- cc.mu.Unlock()
-
- if werr != nil {
- if hasBody {
- req.Body.Close() // per RoundTripper contract
- }
- cc.forgetStreamID(cs.ID)
- // Don't bother sending a RST_STREAM (our write already failed;
- // no need to keep writing)
- return nil, werr
- }
-
- var respHeaderTimer <-chan time.Time
- var bodyCopyErrc chan error // result of body copy
- if hasBody {
- bodyCopyErrc = make(chan error, 1)
- go func() {
- bodyCopyErrc <- cs.writeRequestBody(body, req.Body)
- }()
- } else {
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
- }
- }
-
- readLoopResCh := cs.resc
- requestCanceledCh := requestCancel(req)
- bodyWritten := false
-
- for {
- select {
- case re := <-readLoopResCh:
- res := re.res
- if re.err != nil || res.StatusCode > 299 {
- // On error or status code 3xx, 4xx, 5xx, etc abort any
- // ongoing write, assuming that the server doesn't care
- // about our request body. If the server replied with 1xx or
- // 2xx, however, then assume the server DOES potentially
- // want our body (e.g. full-duplex streaming:
- // golang.org/issue/13444). If it turns out the server
- // doesn't, they'll RST_STREAM us soon enough. This is a
- // heuristic to avoid adding knobs to Transport. Hopefully
- // we can keep it.
- cs.abortRequestBodyWrite(errStopReqBodyWrite)
- }
- if re.err != nil {
- cc.forgetStreamID(cs.ID)
- return nil, re.err
- }
- res.Request = req
- res.TLS = cc.tlsState
- return res, nil
- case <-respHeaderTimer:
- cc.forgetStreamID(cs.ID)
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- }
- return nil, errTimeout
- case <-requestCanceledCh:
- cc.forgetStreamID(cs.ID)
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- }
- return nil, errRequestCanceled
- case <-cs.peerReset:
- // processResetStream already removed the
- // stream from the streams map; no need for
- // forgetStreamID.
- return nil, cs.resetErr
- case err := <-bodyCopyErrc:
- if err != nil {
- return nil, err
- }
- bodyWritten = true
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
- }
- }
- }
-}
-
-// requires cc.wmu be held
-func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
- first := true // first frame written (HEADERS is first, then CONTINUATION)
- frameSize := int(cc.maxFrameSize)
- for len(hdrs) > 0 && cc.werr == nil {
- chunk := hdrs
- if len(chunk) > frameSize {
- chunk = chunk[:frameSize]
- }
- hdrs = hdrs[len(chunk):]
- endHeaders := len(hdrs) == 0
- if first {
- cc.fr.WriteHeaders(HeadersFrameParam{
- StreamID: streamID,
- BlockFragment: chunk,
- EndStream: endStream,
- EndHeaders: endHeaders,
- })
- first = false
- } else {
- cc.fr.WriteContinuation(streamID, endHeaders, chunk)
- }
- }
- // TODO(bradfitz): this Flush could potentially block (as
- // could the WriteHeaders call(s) above), which means they
- // wouldn't respond to Request.Cancel being readable. That's
- // rare, but this should probably be in a goroutine.
- cc.bw.Flush()
- return cc.werr
-}
-
-// internal error values; they don't escape to callers
-var (
- // abort request body write; don't send cancel
- errStopReqBodyWrite = errors.New("http2: aborting request body write")
-
- // abort request body write, but send stream reset of cancel.
- errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
-)
-
-func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
- cc := cs.cc
- sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
- buf := cc.frameScratchBuffer()
- defer cc.putFrameScratchBuffer(buf)
-
- defer func() {
- // TODO: write h12Compare test showing whether
- // Request.Body is closed by the Transport,
- // and in multiple cases: server replies <=299 and >299
- // while still writing request body
- cerr := bodyCloser.Close()
- if err == nil {
- err = cerr
- }
- }()
-
- req := cs.req
- hasTrailers := req.Trailer != nil
-
- var sawEOF bool
- for !sawEOF {
- n, err := body.Read(buf)
- if err == io.EOF {
- sawEOF = true
- err = nil
- } else if err != nil {
- return err
- }
-
- remain := buf[:n]
- for len(remain) > 0 && err == nil {
- var allowed int32
- allowed, err = cs.awaitFlowControl(len(remain))
- switch {
- case err == errStopReqBodyWrite:
- return err
- case err == errStopReqBodyWriteAndCancel:
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- return err
- case err != nil:
- return err
- }
- cc.wmu.Lock()
- data := remain[:allowed]
- remain = remain[allowed:]
- sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
- err = cc.fr.WriteData(cs.ID, sentEnd, data)
- if err == nil {
- // TODO(bradfitz): this flush is for latency, not bandwidth.
- // Most requests won't need this. Make this opt-in or opt-out?
- // Use some heuristic on the body type? Nagel-like timers?
- // Based on 'n'? Only last chunk of this for loop, unless flow control
- // tokens are low? For now, always:
- err = cc.bw.Flush()
- }
- cc.wmu.Unlock()
- }
- if err != nil {
- return err
- }
- }
-
- cc.wmu.Lock()
- if !sentEnd {
- var trls []byte
- if hasTrailers {
- cc.mu.Lock()
- trls = cc.encodeTrailers(req)
- cc.mu.Unlock()
- }
-
- // Avoid forgetting to send an END_STREAM if the encoded
- // trailers are 0 bytes. Both results produce and END_STREAM.
- if len(trls) > 0 {
- err = cc.writeHeaders(cs.ID, true, trls)
- } else {
- err = cc.fr.WriteData(cs.ID, true, nil)
- }
- }
- if ferr := cc.bw.Flush(); ferr != nil && err == nil {
- err = ferr
- }
- cc.wmu.Unlock()
-
- return err
-}
-
-// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
-// control tokens from the server.
-// It returns either the non-zero number of tokens taken or an error
-// if the stream is dead.
-func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
- cc := cs.cc
- cc.mu.Lock()
- defer cc.mu.Unlock()
- for {
- if cc.closed {
- return 0, errClientConnClosed
- }
- if cs.stopReqBody != nil {
- return 0, cs.stopReqBody
- }
- if err := cs.checkReset(); err != nil {
- return 0, err
- }
- if a := cs.flow.available(); a > 0 {
- take := a
- if int(take) > maxBytes {
-
- take = int32(maxBytes) // can't truncate int; take is int32
- }
- if take > int32(cc.maxFrameSize) {
- take = int32(cc.maxFrameSize)
- }
- cs.flow.take(take)
- return take, nil
- }
- cc.cond.Wait()
- }
-}
-
-type badStringError struct {
- what string
- str string
-}
-
-func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
-
-// requires cc.mu be held.
-func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) []byte {
- cc.hbuf.Reset()
-
- host := req.Host
- if host == "" {
- host = req.URL.Host
- }
-
- // 8.1.2.3 Request Pseudo-Header Fields
- // The :path pseudo-header field includes the path and query parts of the
- // target URI (the path-absolute production and optionally a '?' character
- // followed by the query production (see Sections 3.3 and 3.4 of
- // [RFC3986]).
- cc.writeHeader(":authority", host)
- cc.writeHeader(":method", req.Method)
- if req.Method != "CONNECT" {
- cc.writeHeader(":path", req.URL.RequestURI())
- cc.writeHeader(":scheme", "https")
- }
- if trailers != "" {
- cc.writeHeader("trailer", trailers)
- }
-
- var didUA bool
- for k, vv := range req.Header {
- lowKey := strings.ToLower(k)
- switch lowKey {
- case "host", "content-length":
- // Host is :authority, already sent.
- // Content-Length is automatic, set below.
- continue
- case "connection", "proxy-connection", "transfer-encoding", "upgrade":
- // Per 8.1.2.2 Connection-Specific Header
- // Fields, don't send connection-specific
- // fields. We deal with these earlier in
- // RoundTrip, deciding whether they're
- // error-worthy, but we don't want to mutate
- // the user's *Request so at this point, just
- // skip over them at this point.
- continue
- case "user-agent":
- // Match Go's http1 behavior: at most one
- // User-Agent. If set to nil or empty string,
- // then omit it. Otherwise if not mentioned,
- // include the default (below).
- didUA = true
- if len(vv) < 1 {
- continue
- }
- vv = vv[:1]
- if vv[0] == "" {
- continue
- }
- }
- for _, v := range vv {
- cc.writeHeader(lowKey, v)
- }
- }
- if shouldSendReqContentLength(req.Method, contentLength) {
- cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
- }
- if addGzipHeader {
- cc.writeHeader("accept-encoding", "gzip")
- }
- if !didUA {
- cc.writeHeader("user-agent", defaultUserAgent)
- }
- return cc.hbuf.Bytes()
-}
-
-// shouldSendReqContentLength reports whether the http2.Transport should send
-// a "content-length" request header. This logic is basically a copy of the net/http
-// transferWriter.shouldSendContentLength.
-// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
-// -1 means unknown.
-func shouldSendReqContentLength(method string, contentLength int64) bool {
- if contentLength > 0 {
- return true
- }
- if contentLength < 0 {
- return false
- }
- // For zero bodies, whether we send a content-length depends on the method.
- // It also kinda doesn't matter for http2 either way, with END_STREAM.
- switch method {
- case "POST", "PUT", "PATCH":
- return true
- default:
- return false
- }
-}
-
-// requires cc.mu be held.
-func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
- cc.hbuf.Reset()
- for k, vv := range req.Trailer {
- // Transfer-Encoding, etc.. have already been filter at the
- // start of RoundTrip
- lowKey := strings.ToLower(k)
- for _, v := range vv {
- cc.writeHeader(lowKey, v)
- }
- }
- return cc.hbuf.Bytes()
-}
-
-func (cc *ClientConn) writeHeader(name, value string) {
- if VerboseLogs {
- log.Printf("http2: Transport encoding header %q = %q", name, value)
- }
- cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
-}
-
-type resAndError struct {
- res *http.Response
- err error
-}
-
-// requires cc.mu be held.
-func (cc *ClientConn) newStream() *clientStream {
- cs := &clientStream{
- cc: cc,
- ID: cc.nextStreamID,
- resc: make(chan resAndError, 1),
- peerReset: make(chan struct{}),
- done: make(chan struct{}),
- }
- cs.flow.add(int32(cc.initialWindowSize))
- cs.flow.setConnFlow(&cc.flow)
- cs.inflow.add(transportDefaultStreamFlow)
- cs.inflow.setConnFlow(&cc.inflow)
- cc.nextStreamID += 2
- cc.streams[cs.ID] = cs
- return cs
-}
-
-func (cc *ClientConn) forgetStreamID(id uint32) {
- cc.streamByID(id, true)
-}
-
-func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- cs := cc.streams[id]
- if andRemove && cs != nil && !cc.closed {
- delete(cc.streams, id)
- close(cs.done)
- }
- return cs
-}
-
-// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
-type clientConnReadLoop struct {
- cc *ClientConn
- activeRes map[uint32]*clientStream // keyed by streamID
- closeWhenIdle bool
-}
-
-// readLoop runs in its own goroutine and reads and dispatches frames.
-func (cc *ClientConn) readLoop() {
- rl := &clientConnReadLoop{
- cc: cc,
- activeRes: make(map[uint32]*clientStream),
- }
-
- defer rl.cleanup()
- cc.readerErr = rl.run()
- if ce, ok := cc.readerErr.(ConnectionError); ok {
- cc.wmu.Lock()
- cc.fr.WriteGoAway(0, ErrCode(ce), nil)
- cc.wmu.Unlock()
- }
-}
-
-func (rl *clientConnReadLoop) cleanup() {
- cc := rl.cc
- defer cc.tconn.Close()
- defer cc.t.connPool().MarkDead(cc)
- defer close(cc.readerDone)
-
- // Close any response bodies if the server closes prematurely.
- // TODO: also do this if we've written the headers but not
- // gotten a response yet.
- err := cc.readerErr
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- cc.mu.Lock()
- for _, cs := range rl.activeRes {
- cs.bufPipe.CloseWithError(err)
- }
- for _, cs := range cc.streams {
- select {
- case cs.resc <- resAndError{err: err}:
- default:
- }
- close(cs.done)
- }
- cc.closed = true
- cc.cond.Broadcast()
- cc.mu.Unlock()
-}
-
-func (rl *clientConnReadLoop) run() error {
- cc := rl.cc
- rl.closeWhenIdle = cc.t.disableKeepAlives()
- gotReply := false // ever saw a reply
- for {
- f, err := cc.fr.ReadFrame()
- if err != nil {
- cc.vlogf("Transport readFrame error: (%T) %v", err, err)
- }
- if se, ok := err.(StreamError); ok {
- if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
- rl.endStreamError(cs, cc.fr.errDetail)
- }
- continue
- } else if err != nil {
- return err
- }
- if VerboseLogs {
- cc.vlogf("http2: Transport received %s", summarizeFrame(f))
- }
- maybeIdle := false // whether frame might transition us to idle
-
- switch f := f.(type) {
- case *MetaHeadersFrame:
- err = rl.processHeaders(f)
- maybeIdle = true
- gotReply = true
- case *DataFrame:
- err = rl.processData(f)
- maybeIdle = true
- case *GoAwayFrame:
- err = rl.processGoAway(f)
- maybeIdle = true
- case *RSTStreamFrame:
- err = rl.processResetStream(f)
- maybeIdle = true
- case *SettingsFrame:
- err = rl.processSettings(f)
- case *PushPromiseFrame:
- err = rl.processPushPromise(f)
- case *WindowUpdateFrame:
- err = rl.processWindowUpdate(f)
- case *PingFrame:
- err = rl.processPing(f)
- default:
- cc.logf("Transport: unhandled response frame type %T", f)
- }
- if err != nil {
- return err
- }
- if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
- cc.closeIfIdle()
- }
- }
-}
-
-func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
- cc := rl.cc
- cs := cc.streamByID(f.StreamID, f.StreamEnded())
- if cs == nil {
- // We'd get here if we canceled a request while the
- // server had its response still in flight. So if this
- // was just something we canceled, ignore it.
- return nil
- }
- if !cs.pastHeaders {
- cs.pastHeaders = true
- } else {
- return rl.processTrailers(cs, f)
- }
-
- res, err := rl.handleResponse(cs, f)
- if err != nil {
- if _, ok := err.(ConnectionError); ok {
- return err
- }
- // Any other error type is a stream error.
- cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
- cs.resc <- resAndError{err: err}
- return nil // return nil from process* funcs to keep conn alive
- }
- if res == nil {
- // (nil, nil) special case. See handleResponse docs.
- return nil
- }
- if res.Body != noBody {
- rl.activeRes[cs.ID] = cs
- }
- cs.resTrailer = &res.Trailer
- cs.resc <- resAndError{res: res}
- return nil
-}
-
-// may return error types nil, or ConnectionError. Any other error value
-// is a StreamError of type ErrCodeProtocol. The returned error in that case
-// is the detail.
-//
-// As a special case, handleResponse may return (nil, nil) to skip the
-// frame (currently only used for 100 expect continue). This special
-// case is going away after Issue 13851 is fixed.
-func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
- if f.Truncated {
- return nil, errResponseHeaderListSize
- }
-
- status := f.PseudoValue("status")
- if status == "" {
- return nil, errors.New("missing status pseudo header")
- }
- statusCode, err := strconv.Atoi(status)
- if err != nil {
- return nil, errors.New("malformed non-numeric status pseudo header")
- }
-
- if statusCode == 100 {
- // Just skip 100-continue response headers for now.
- // TODO: golang.org/issue/13851 for doing it properly.
- cs.pastHeaders = false // do it all again
- return nil, nil
- }
-
- header := make(http.Header)
- res := &http.Response{
- Proto: "HTTP/2.0",
- ProtoMajor: 2,
- Header: header,
- StatusCode: statusCode,
- Status: status + " " + http.StatusText(statusCode),
- }
- for _, hf := range f.RegularFields() {
- key := http.CanonicalHeaderKey(hf.Name)
- if key == "Trailer" {
- t := res.Trailer
- if t == nil {
- t = make(http.Header)
- res.Trailer = t
- }
- foreachHeaderElement(hf.Value, func(v string) {
- t[http.CanonicalHeaderKey(v)] = nil
- })
- } else {
- header[key] = append(header[key], hf.Value)
- }
- }
-
- streamEnded := f.StreamEnded()
- if !streamEnded || cs.req.Method == "HEAD" {
- res.ContentLength = -1
- if clens := res.Header["Content-Length"]; len(clens) == 1 {
- if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
- res.ContentLength = clen64
- } else {
- // TODO: care? unlike http/1, it won't mess up our framing, so it's
- // more safe smuggling-wise to ignore.
- }
- } else if len(clens) > 1 {
- // TODO: care? unlike http/1, it won't mess up our framing, so it's
- // more safe smuggling-wise to ignore.
- }
- }
-
- if streamEnded {
- res.Body = noBody
- return res, nil
- }
-
- buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
- cs.bufPipe = pipe{b: buf}
- cs.bytesRemain = res.ContentLength
- res.Body = transportResponseBody{cs}
- go cs.awaitRequestCancel(requestCancel(cs.req))
-
- if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
- res.Header.Del("Content-Encoding")
- res.Header.Del("Content-Length")
- res.ContentLength = -1
- res.Body = &gzipReader{body: res.Body}
- }
- return res, nil
-}
-
-func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
- if cs.pastTrailers {
- // Too many HEADERS frames for this stream.
- return ConnectionError(ErrCodeProtocol)
- }
- cs.pastTrailers = true
- if !f.StreamEnded() {
- // We expect that any headers for trailers also
- // has END_STREAM.
- return ConnectionError(ErrCodeProtocol)
- }
- if len(f.PseudoFields()) > 0 {
- // No pseudo header fields are defined for trailers.
- // TODO: ConnectionError might be overly harsh? Check.
- return ConnectionError(ErrCodeProtocol)
- }
-
- trailer := make(http.Header)
- for _, hf := range f.RegularFields() {
- key := http.CanonicalHeaderKey(hf.Name)
- trailer[key] = append(trailer[key], hf.Value)
- }
- cs.trailer = trailer
-
- rl.endStream(cs)
- return nil
-}
-
-// transportResponseBody is the concrete type of Transport.RoundTrip's
-// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
-// On Close it sends RST_STREAM if EOF wasn't already seen.
-type transportResponseBody struct {
- cs *clientStream
-}
-
-func (b transportResponseBody) Read(p []byte) (n int, err error) {
- cs := b.cs
- cc := cs.cc
-
- if cs.readErr != nil {
- return 0, cs.readErr
- }
- n, err = b.cs.bufPipe.Read(p)
- if cs.bytesRemain != -1 {
- if int64(n) > cs.bytesRemain {
- n = int(cs.bytesRemain)
- if err == nil {
- err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
- cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
- }
- cs.readErr = err
- return int(cs.bytesRemain), err
- }
- cs.bytesRemain -= int64(n)
- if err == io.EOF && cs.bytesRemain > 0 {
- err = io.ErrUnexpectedEOF
- cs.readErr = err
- return n, err
- }
- }
- if n == 0 {
- // No flow control tokens to send back.
- return
- }
-
- cc.mu.Lock()
- defer cc.mu.Unlock()
-
- var connAdd, streamAdd int32
- // Check the conn-level first, before the stream-level.
- if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
- connAdd = transportDefaultConnFlow - v
- cc.inflow.add(connAdd)
- }
- if err == nil { // No need to refresh if the stream is over or failed.
- if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
- streamAdd = transportDefaultStreamFlow - v
- cs.inflow.add(streamAdd)
- }
- }
- if connAdd != 0 || streamAdd != 0 {
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if connAdd != 0 {
- cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
- }
- if streamAdd != 0 {
- cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
- }
- cc.bw.Flush()
- }
- return
-}
-
-var errClosedResponseBody = errors.New("http2: response body closed")
-
-func (b transportResponseBody) Close() error {
- cs := b.cs
- if cs.bufPipe.Err() != io.EOF {
- // TODO: write test for this
- cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- }
- cs.bufPipe.BreakWithError(errClosedResponseBody)
- return nil
-}
-
-func (rl *clientConnReadLoop) processData(f *DataFrame) error {
- cc := rl.cc
- cs := cc.streamByID(f.StreamID, f.StreamEnded())
- if cs == nil {
- cc.mu.Lock()
- neverSent := cc.nextStreamID
- cc.mu.Unlock()
- if f.StreamID >= neverSent {
- // We never asked for this.
- cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
- return ConnectionError(ErrCodeProtocol)
- }
- // We probably did ask for this, but canceled. Just ignore it.
- // TODO: be stricter here? only silently ignore things which
- // we canceled, but not things which were closed normally
- // by the peer? Tough without accumulating too much state.
- return nil
- }
- if data := f.Data(); len(data) > 0 {
- if cs.bufPipe.b == nil {
- // Data frame after it's already closed?
- cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
- return ConnectionError(ErrCodeProtocol)
- }
-
- // Check connection-level flow control.
- cc.mu.Lock()
- if cs.inflow.available() >= int32(len(data)) {
- cs.inflow.take(int32(len(data)))
- } else {
- cc.mu.Unlock()
- return ConnectionError(ErrCodeFlowControl)
- }
- cc.mu.Unlock()
-
- if _, err := cs.bufPipe.Write(data); err != nil {
- rl.endStreamError(cs, err)
- return err
- }
- }
-
- if f.StreamEnded() {
- rl.endStream(cs)
- }
- return nil
-}
-
-var errInvalidTrailers = errors.New("http2: invalid trailers")
-
-func (rl *clientConnReadLoop) endStream(cs *clientStream) {
- // TODO: check that any declared content-length matches, like
- // server.go's (*stream).endStream method.
- rl.endStreamError(cs, nil)
-}
-
-func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
- var code func()
- if err == nil {
- err = io.EOF
- code = cs.copyTrailers
- }
- cs.bufPipe.closeWithErrorAndCode(err, code)
- delete(rl.activeRes, cs.ID)
- if cs.req.Close || cs.req.Header.Get("Connection") == "close" {
- rl.closeWhenIdle = true
- }
-}
-
-func (cs *clientStream) copyTrailers() {
- for k, vv := range cs.trailer {
- t := cs.resTrailer
- if *t == nil {
- *t = make(http.Header)
- }
- (*t)[k] = vv
- }
-}
-
-func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
- cc := rl.cc
- cc.t.connPool().MarkDead(cc)
- if f.ErrCode != 0 {
- // TODO: deal with GOAWAY more. particularly the error code
- cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
- }
- cc.setGoAway(f)
- return nil
-}
-
-func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
- cc := rl.cc
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return f.ForeachSetting(func(s Setting) error {
- switch s.ID {
- case SettingMaxFrameSize:
- cc.maxFrameSize = s.Val
- case SettingMaxConcurrentStreams:
- cc.maxConcurrentStreams = s.Val
- case SettingInitialWindowSize:
- // TODO: error if this is too large.
-
- // TODO: adjust flow control of still-open
- // frames by the difference of the old initial
- // window size and this one.
- cc.initialWindowSize = s.Val
- default:
- // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
- cc.vlogf("Unhandled Setting: %v", s)
- }
- return nil
- })
-}
-
-func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
- cc := rl.cc
- cs := cc.streamByID(f.StreamID, false)
- if f.StreamID != 0 && cs == nil {
- return nil
- }
-
- cc.mu.Lock()
- defer cc.mu.Unlock()
-
- fl := &cc.flow
- if cs != nil {
- fl = &cs.flow
- }
- if !fl.add(int32(f.Increment)) {
- return ConnectionError(ErrCodeFlowControl)
- }
- cc.cond.Broadcast()
- return nil
-}
-
-func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
- cs := rl.cc.streamByID(f.StreamID, true)
- if cs == nil {
- // TODO: return error if server tries to RST_STEAM an idle stream
- return nil
- }
- select {
- case <-cs.peerReset:
- // Already reset.
- // This is the only goroutine
- // which closes this, so there
- // isn't a race.
- default:
- err := StreamError{cs.ID, f.ErrCode}
- cs.resetErr = err
- close(cs.peerReset)
- cs.bufPipe.CloseWithError(err)
- cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl
- }
- delete(rl.activeRes, cs.ID)
- return nil
-}
-
-func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
- if f.IsAck() {
- // 6.7 PING: " An endpoint MUST NOT respond to PING frames
- // containing this flag."
- return nil
- }
- cc := rl.cc
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if err := cc.fr.WritePing(true, f.Data); err != nil {
- return err
- }
- return cc.bw.Flush()
-}
-
-func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
- // We told the peer we don't want them.
- // Spec says:
- // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
- // setting of the peer endpoint is set to 0. An endpoint that
- // has set this setting and has received acknowledgement MUST
- // treat the receipt of a PUSH_PROMISE frame as a connection
- // error (Section 5.4.1) of type PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
-}
-
-func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
- // TODO: do something with err? send it as a debug frame to the peer?
- // But that's only in GOAWAY. Invent a new frame type? Is there one already?
- cc.wmu.Lock()
- cc.fr.WriteRSTStream(streamID, code)
- cc.bw.Flush()
- cc.wmu.Unlock()
-}
-
-var (
- errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
- errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
-)
-
-func (cc *ClientConn) logf(format string, args ...interface{}) {
- cc.t.logf(format, args...)
-}
-
-func (cc *ClientConn) vlogf(format string, args ...interface{}) {
- cc.t.vlogf(format, args...)
-}
-
-func (t *Transport) vlogf(format string, args ...interface{}) {
- if VerboseLogs {
- t.logf(format, args...)
- }
-}
-
-func (t *Transport) logf(format string, args ...interface{}) {
- log.Printf(format, args...)
-}
-
-var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
-
-func strSliceContains(ss []string, s string) bool {
- for _, v := range ss {
- if v == s {
- return true
- }
- }
- return false
-}
-
-type erringRoundTripper struct{ err error }
-
-func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
-
-// gzipReader wraps a response body so it can lazily
-// call gzip.NewReader on the first call to Read
-type gzipReader struct {
- body io.ReadCloser // underlying Response.Body
- zr *gzip.Reader // lazily-initialized gzip reader
- zerr error // sticky error
-}
-
-func (gz *gzipReader) Read(p []byte) (n int, err error) {
- if gz.zerr != nil {
- return 0, gz.zerr
- }
- if gz.zr == nil {
- gz.zr, err = gzip.NewReader(gz.body)
- if err != nil {
- gz.zerr = err
- return 0, err
- }
- }
- return gz.zr.Read(p)
-}
-
-func (gz *gzipReader) Close() error {
- return gz.body.Close()
-}
-
-type errorReader struct{ err error }
-
-func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go
deleted file mode 100644
index 6b77a9ed..00000000
--- a/vendor/golang.org/x/net/http2/transport_test.go
+++ /dev/null
@@ -1,1740 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math/rand"
- "net"
- "net/http"
- "net/url"
- "os"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "golang.org/x/net/http2/hpack"
-)
-
-var (
- extNet = flag.Bool("extnet", false, "do external network tests")
- transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport")
- insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove?
-)
-
-var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true}
-
-func TestTransportExternal(t *testing.T) {
- if !*extNet {
- t.Skip("skipping external network test")
- }
- req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil)
- rt := &Transport{TLSClientConfig: tlsConfigInsecure}
- res, err := rt.RoundTrip(req)
- if err != nil {
- t.Fatalf("%v", err)
- }
- res.Write(os.Stdout)
-}
-
-func TestTransport(t *testing.T) {
- const body = "sup"
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, body)
- }, optOnlyServer)
- defer st.Close()
-
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
-
- req, err := http.NewRequest("GET", st.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- res, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
-
- t.Logf("Got res: %+v", res)
- if g, w := res.StatusCode, 200; g != w {
- t.Errorf("StatusCode = %v; want %v", g, w)
- }
- if g, w := res.Status, "200 OK"; g != w {
- t.Errorf("Status = %q; want %q", g, w)
- }
- wantHeader := http.Header{
- "Content-Length": []string{"3"},
- "Content-Type": []string{"text/plain; charset=utf-8"},
- "Date": []string{"XXX"}, // see cleanDate
- }
- cleanDate(res)
- if !reflect.DeepEqual(res.Header, wantHeader) {
- t.Errorf("res Header = %v; want %v", res.Header, wantHeader)
- }
- if res.Request != req {
- t.Errorf("Response.Request = %p; want %p", res.Request, req)
- }
- if res.TLS == nil {
- t.Error("Response.TLS = nil; want non-nil")
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Errorf("Body read: %v", err)
- } else if string(slurp) != body {
- t.Errorf("Body = %q; want %q", slurp, body)
- }
-}
-func onSameConn(t *testing.T, modReq func(*http.Request)) bool {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, r.RemoteAddr)
- }, optOnlyServer)
- defer st.Close()
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
- get := func() string {
- req, err := http.NewRequest("GET", st.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- modReq(req)
- res, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("Body read: %v", err)
- }
- addr := strings.TrimSpace(string(slurp))
- if addr == "" {
- t.Fatalf("didn't get an addr in response")
- }
- return addr
- }
- first := get()
- second := get()
- return first == second
-}
-
-func TestTransportReusesConns(t *testing.T) {
- if !onSameConn(t, func(*http.Request) {}) {
- t.Errorf("first and second responses were on different connections")
- }
-}
-
-func TestTransportReusesConn_RequestClose(t *testing.T) {
- if onSameConn(t, func(r *http.Request) { r.Close = true }) {
- t.Errorf("first and second responses were not on different connections")
- }
-}
-
-func TestTransportReusesConn_ConnClose(t *testing.T) {
- if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) {
- t.Errorf("first and second responses were not on different connections")
- }
-}
-
-// Tests that the Transport only keeps one pending dial open per destination address.
-// https://golang.org/issue/13397
-func TestTransportGroupsPendingDials(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, r.RemoteAddr)
- }, optOnlyServer)
- defer st.Close()
- tr := &Transport{
- TLSClientConfig: tlsConfigInsecure,
- }
- defer tr.CloseIdleConnections()
- var (
- mu sync.Mutex
- dials = map[string]int{}
- )
- var wg sync.WaitGroup
- for i := 0; i < 10; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- req, err := http.NewRequest("GET", st.ts.URL, nil)
- if err != nil {
- t.Error(err)
- return
- }
- res, err := tr.RoundTrip(req)
- if err != nil {
- t.Error(err)
- return
- }
- defer res.Body.Close()
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Errorf("Body read: %v", err)
- }
- addr := strings.TrimSpace(string(slurp))
- if addr == "" {
- t.Errorf("didn't get an addr in response")
- }
- mu.Lock()
- dials[addr]++
- mu.Unlock()
- }()
- }
- wg.Wait()
- if len(dials) != 1 {
- t.Errorf("saw %d dials; want 1: %v", len(dials), dials)
- }
- tr.CloseIdleConnections()
- if err := retry(50, 10*time.Millisecond, func() error {
- cp, ok := tr.connPool().(*clientConnPool)
- if !ok {
- return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool())
- }
- cp.mu.Lock()
- defer cp.mu.Unlock()
- if len(cp.dialing) != 0 {
- return fmt.Errorf("dialing map = %v; want empty", cp.dialing)
- }
- if len(cp.conns) != 0 {
- return fmt.Errorf("conns = %v; want empty", cp.conns)
- }
- if len(cp.keys) != 0 {
- return fmt.Errorf("keys = %v; want empty", cp.keys)
- }
- return nil
- }); err != nil {
- t.Errorf("State of pool after CloseIdleConnections: %v", err)
- }
-}
-
-func retry(tries int, delay time.Duration, fn func() error) error {
- var err error
- for i := 0; i < tries; i++ {
- err = fn()
- if err == nil {
- return nil
- }
- time.Sleep(delay)
- }
- return err
-}
-
-func TestTransportAbortClosesPipes(t *testing.T) {
- shutdown := make(chan struct{})
- st := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- w.(http.Flusher).Flush()
- <-shutdown
- },
- optOnlyServer,
- )
- defer st.Close()
- defer close(shutdown) // we must shutdown before st.Close() to avoid hanging
-
- done := make(chan struct{})
- requestMade := make(chan struct{})
- go func() {
- defer close(done)
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- req, err := http.NewRequest("GET", st.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- res, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- close(requestMade)
- _, err = ioutil.ReadAll(res.Body)
- if err == nil {
- t.Error("expected error from res.Body.Read")
- }
- }()
-
- <-requestMade
- // Now force the serve loop to end, via closing the connection.
- st.closeConn()
- // deadlock? that's a bug.
- select {
- case <-done:
- case <-time.After(3 * time.Second):
- t.Fatal("timeout")
- }
-}
-
-// TODO: merge this with TestTransportBody to make TestTransportRequest? This
-// could be a table-driven test with extra goodies.
-func TestTransportPath(t *testing.T) {
- gotc := make(chan *url.URL, 1)
- st := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- gotc <- r.URL
- },
- optOnlyServer,
- )
- defer st.Close()
-
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
- const (
- path = "/testpath"
- query = "q=1"
- )
- surl := st.ts.URL + path + "?" + query
- req, err := http.NewRequest("POST", surl, nil)
- if err != nil {
- t.Fatal(err)
- }
- c := &http.Client{Transport: tr}
- res, err := c.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- got := <-gotc
- if got.Path != path {
- t.Errorf("Read Path = %q; want %q", got.Path, path)
- }
- if got.RawQuery != query {
- t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query)
- }
-}
-
-func randString(n int) string {
- rnd := rand.New(rand.NewSource(int64(n)))
- b := make([]byte, n)
- for i := range b {
- b[i] = byte(rnd.Intn(256))
- }
- return string(b)
-}
-
-var bodyTests = []struct {
- body string
- noContentLen bool
-}{
- {body: "some message"},
- {body: "some message", noContentLen: true},
- {body: ""},
- {body: "", noContentLen: true},
- {body: strings.Repeat("a", 1<<20), noContentLen: true},
- {body: strings.Repeat("a", 1<<20)},
- {body: randString(16<<10 - 1)},
- {body: randString(16 << 10)},
- {body: randString(16<<10 + 1)},
- {body: randString(512<<10 - 1)},
- {body: randString(512 << 10)},
- {body: randString(512<<10 + 1)},
- {body: randString(1<<20 - 1)},
- {body: randString(1 << 20)},
- {body: randString(1<<20 + 2)},
-}
-
-func TestTransportBody(t *testing.T) {
- type reqInfo struct {
- req *http.Request
- slurp []byte
- err error
- }
- gotc := make(chan reqInfo, 1)
- st := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- slurp, err := ioutil.ReadAll(r.Body)
- if err != nil {
- gotc <- reqInfo{err: err}
- } else {
- gotc <- reqInfo{req: r, slurp: slurp}
- }
- },
- optOnlyServer,
- )
- defer st.Close()
-
- for i, tt := range bodyTests {
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
-
- var body io.Reader = strings.NewReader(tt.body)
- if tt.noContentLen {
- body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods
- }
- req, err := http.NewRequest("POST", st.ts.URL, body)
- if err != nil {
- t.Fatalf("#%d: %v", i, err)
- }
- c := &http.Client{Transport: tr}
- res, err := c.Do(req)
- if err != nil {
- t.Fatalf("#%d: %v", i, err)
- }
- defer res.Body.Close()
- ri := <-gotc
- if ri.err != nil {
- t.Errorf("#%d: read error: %v", i, ri.err)
- continue
- }
- if got := string(ri.slurp); got != tt.body {
- t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body))
- }
- wantLen := int64(len(tt.body))
- if tt.noContentLen && tt.body != "" {
- wantLen = -1
- }
- if ri.req.ContentLength != wantLen {
- t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen)
- }
- }
-}
-
-func shortString(v string) string {
- const maxLen = 100
- if len(v) <= maxLen {
- return v
- }
- return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:])
-}
-
-func TestTransportDialTLS(t *testing.T) {
- var mu sync.Mutex // guards following
- var gotReq, didDial bool
-
- ts := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- mu.Lock()
- gotReq = true
- mu.Unlock()
- },
- optOnlyServer,
- )
- defer ts.Close()
- tr := &Transport{
- DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
- mu.Lock()
- didDial = true
- mu.Unlock()
- cfg.InsecureSkipVerify = true
- c, err := tls.Dial(netw, addr, cfg)
- if err != nil {
- return nil, err
- }
- return c, c.Handshake()
- },
- }
- defer tr.CloseIdleConnections()
- client := &http.Client{Transport: tr}
- res, err := client.Get(ts.ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- res.Body.Close()
- mu.Lock()
- if !gotReq {
- t.Error("didn't get request")
- }
- if !didDial {
- t.Error("didn't use dial hook")
- }
-}
-
-func TestConfigureTransport(t *testing.T) {
- t1 := &http.Transport{}
- err := ConfigureTransport(t1)
- if err == errTransportVersion {
- t.Skip(err)
- }
- if err != nil {
- t.Fatal(err)
- }
- if got := fmt.Sprintf("%#v", *t1); !strings.Contains(got, `"h2"`) {
- // Laziness, to avoid buildtags.
- t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got)
- }
- wantNextProtos := []string{"h2", "http/1.1"}
- if t1.TLSClientConfig == nil {
- t.Errorf("nil t1.TLSClientConfig")
- } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) {
- t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos)
- }
- if err := ConfigureTransport(t1); err == nil {
- t.Error("unexpected success on second call to ConfigureTransport")
- }
-
- // And does it work?
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, r.Proto)
- }, optOnlyServer)
- defer st.Close()
-
- t1.TLSClientConfig.InsecureSkipVerify = true
- c := &http.Client{Transport: t1}
- res, err := c.Get(st.ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if got, want := string(slurp), "HTTP/2.0"; got != want {
- t.Errorf("body = %q; want %q", got, want)
- }
-}
-
-type capitalizeReader struct {
- r io.Reader
-}
-
-func (cr capitalizeReader) Read(p []byte) (n int, err error) {
- n, err = cr.r.Read(p)
- for i, b := range p[:n] {
- if b >= 'a' && b <= 'z' {
- p[i] = b - ('a' - 'A')
- }
- }
- return
-}
-
-type flushWriter struct {
- w io.Writer
-}
-
-func (fw flushWriter) Write(p []byte) (n int, err error) {
- n, err = fw.w.Write(p)
- if f, ok := fw.w.(http.Flusher); ok {
- f.Flush()
- }
- return
-}
-
-type clientTester struct {
- t *testing.T
- tr *Transport
- sc, cc net.Conn // server and client conn
- fr *Framer // server's framer
- client func() error
- server func() error
-}
-
-func newClientTester(t *testing.T) *clientTester {
- var dialOnce struct {
- sync.Mutex
- dialed bool
- }
- ct := &clientTester{
- t: t,
- }
- ct.tr = &Transport{
- TLSClientConfig: tlsConfigInsecure,
- DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
- dialOnce.Lock()
- defer dialOnce.Unlock()
- if dialOnce.dialed {
- return nil, errors.New("only one dial allowed in test mode")
- }
- dialOnce.dialed = true
- return ct.cc, nil
- },
- }
-
- ln := newLocalListener(t)
- cc, err := net.Dial("tcp", ln.Addr().String())
- if err != nil {
- t.Fatal(err)
-
- }
- sc, err := ln.Accept()
- if err != nil {
- t.Fatal(err)
- }
- ln.Close()
- ct.cc = cc
- ct.sc = sc
- ct.fr = NewFramer(sc, sc)
- return ct
-}
-
-func newLocalListener(t *testing.T) net.Listener {
- ln, err := net.Listen("tcp4", "127.0.0.1:0")
- if err == nil {
- return ln
- }
- ln, err = net.Listen("tcp6", "[::1]:0")
- if err != nil {
- t.Fatal(err)
- }
- return ln
-}
-
-func (ct *clientTester) greet() {
- buf := make([]byte, len(ClientPreface))
- _, err := io.ReadFull(ct.sc, buf)
- if err != nil {
- ct.t.Fatalf("reading client preface: %v", err)
- }
- f, err := ct.fr.ReadFrame()
- if err != nil {
- ct.t.Fatalf("Reading client settings frame: %v", err)
- }
- if sf, ok := f.(*SettingsFrame); !ok {
- ct.t.Fatalf("Wanted client settings frame; got %v", f)
- _ = sf // stash it away?
- }
- if err := ct.fr.WriteSettings(); err != nil {
- ct.t.Fatal(err)
- }
- if err := ct.fr.WriteSettingsAck(); err != nil {
- ct.t.Fatal(err)
- }
-}
-
-func (ct *clientTester) cleanup() {
- ct.tr.CloseIdleConnections()
-}
-
-func (ct *clientTester) run() {
- errc := make(chan error, 2)
- ct.start("client", errc, ct.client)
- ct.start("server", errc, ct.server)
- defer ct.cleanup()
- for i := 0; i < 2; i++ {
- if err := <-errc; err != nil {
- ct.t.Error(err)
- return
- }
- }
-}
-
-func (ct *clientTester) start(which string, errc chan<- error, fn func() error) {
- go func() {
- finished := false
- var err error
- defer func() {
- if !finished {
- err = fmt.Errorf("%s goroutine didn't finish.", which)
- } else if err != nil {
- err = fmt.Errorf("%s: %v", which, err)
- }
- errc <- err
- }()
- err = fn()
- finished = true
- }()
-}
-
-type countingReader struct {
- n *int64
-}
-
-func (r countingReader) Read(p []byte) (n int, err error) {
- for i := range p {
- p[i] = byte(i)
- }
- atomic.AddInt64(r.n, int64(len(p)))
- return len(p), err
-}
-
-func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) }
-func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) }
-
-func testTransportReqBodyAfterResponse(t *testing.T, status int) {
- const bodySize = 10 << 20
- ct := newClientTester(t)
- ct.client = func() error {
- var n int64 // atomic
- req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize))
- if err != nil {
- return err
- }
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != status {
- return fmt.Errorf("status code = %v; want %v", res.StatusCode, status)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return fmt.Errorf("Slurp: %v", err)
- }
- if len(slurp) > 0 {
- return fmt.Errorf("unexpected body: %q", slurp)
- }
- if status == 200 {
- if got := atomic.LoadInt64(&n); got != bodySize {
- return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize)
- }
- } else {
- if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize {
- return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize)
- }
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- var dataRecv int64
- var closed bool
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- //println(fmt.Sprintf("server got frame: %v", f))
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- if !f.HeadersEnded() {
- return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
- }
- if f.StreamEnded() {
- return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f)
- }
- time.Sleep(50 * time.Millisecond) // let client send body
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- case *DataFrame:
- dataLen := len(f.Data())
- dataRecv += int64(dataLen)
- if dataLen > 0 {
- if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
- return err
- }
- if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {
- return err
- }
- }
- if !closed && ((status != 200 && dataRecv > 0) ||
- (status == 200 && dataRecv == bodySize)) {
- closed = true
- if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil {
- return err
- }
- return nil
- }
- default:
- return fmt.Errorf("Unexpected client frame %v", f)
- }
- }
- return nil
- }
- ct.run()
-}
-
-// See golang.org/issue/13444
-func TestTransportFullDuplex(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(200) // redundant but for clarity
- w.(http.Flusher).Flush()
- io.Copy(flushWriter{w}, capitalizeReader{r.Body})
- fmt.Fprintf(w, "bye.\n")
- }, optOnlyServer)
- defer st.Close()
-
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
- c := &http.Client{Transport: tr}
-
- pr, pw := io.Pipe()
- req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr))
- if err != nil {
- log.Fatal(err)
- }
- req.ContentLength = -1
- res, err := c.Do(req)
- if err != nil {
- log.Fatal(err)
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200)
- }
- bs := bufio.NewScanner(res.Body)
- want := func(v string) {
- if !bs.Scan() {
- t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err())
- }
- }
- write := func(v string) {
- _, err := io.WriteString(pw, v)
- if err != nil {
- t.Fatalf("pipe write: %v", err)
- }
- }
- write("foo\n")
- want("FOO")
- write("bar\n")
- want("BAR")
- pw.Close()
- want("bye.")
- if err := bs.Err(); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestTransportConnectRequest(t *testing.T) {
- gotc := make(chan *http.Request, 1)
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- gotc <- r
- }, optOnlyServer)
- defer st.Close()
-
- u, err := url.Parse(st.ts.URL)
- if err != nil {
- t.Fatal(err)
- }
-
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
- c := &http.Client{Transport: tr}
-
- tests := []struct {
- req *http.Request
- want string
- }{
- {
- req: &http.Request{
- Method: "CONNECT",
- Header: http.Header{},
- URL: u,
- },
- want: u.Host,
- },
- {
- req: &http.Request{
- Method: "CONNECT",
- Header: http.Header{},
- URL: u,
- Host: "example.com:123",
- },
- want: "example.com:123",
- },
- }
-
- for i, tt := range tests {
- res, err := c.Do(tt.req)
- if err != nil {
- t.Errorf("%d. RoundTrip = %v", i, err)
- continue
- }
- res.Body.Close()
- req := <-gotc
- if req.Method != "CONNECT" {
- t.Errorf("method = %q; want CONNECT", req.Method)
- }
- if req.Host != tt.want {
- t.Errorf("Host = %q; want %q", req.Host, tt.want)
- }
- if req.URL.Host != tt.want {
- t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want)
- }
- }
-}
-
-type headerType int
-
-const (
- noHeader headerType = iota // omitted
- oneHeader
- splitHeader // broken into continuation on purpose
-)
-
-const (
- f0 = noHeader
- f1 = oneHeader
- f2 = splitHeader
- d0 = false
- d1 = true
-)
-
-// Test all 36 combinations of response frame orders:
-// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) }
-// Generated by http://play.golang.org/p/SScqYKJYXd
-func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) }
-func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) }
-func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) }
-func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) }
-func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) }
-func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) }
-func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) }
-func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) }
-func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) }
-func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) }
-func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) }
-func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) }
-func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) }
-func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) }
-func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) }
-func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) }
-func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) }
-func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) }
-func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) }
-func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) }
-func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) }
-func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) }
-func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) }
-func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) }
-func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) }
-func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) }
-func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) }
-func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) }
-func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) }
-func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) }
-func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) }
-func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) }
-func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) }
-func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) }
-func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) }
-func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) }
-
-func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) {
- const reqBody = "some request body"
- const resBody = "some response body"
-
- if resHeader == noHeader {
- // TODO: test 100-continue followed by immediate
- // server stream reset, without headers in the middle?
- panic("invalid combination")
- }
-
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody))
- if expect100Continue != noHeader {
- req.Header.Set("Expect", "100-continue")
- }
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return fmt.Errorf("status code = %v; want 200", res.StatusCode)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return fmt.Errorf("Slurp: %v", err)
- }
- wantBody := resBody
- if !withData {
- wantBody = ""
- }
- if string(slurp) != wantBody {
- return fmt.Errorf("body = %q; want %q", slurp, wantBody)
- }
- if trailers == noHeader {
- if len(res.Trailer) > 0 {
- t.Errorf("Trailer = %v; want none", res.Trailer)
- }
- } else {
- want := http.Header{"Some-Trailer": {"some-value"}}
- if !reflect.DeepEqual(res.Trailer, want) {
- t.Errorf("Trailer = %v; want %v", res.Trailer, want)
- }
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
-
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *DataFrame:
- // ignore for now.
- case *HeadersFrame:
- endStream := false
- send := func(mode headerType) {
- hbf := buf.Bytes()
- switch mode {
- case oneHeader:
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: endStream,
- BlockFragment: hbf,
- })
- case splitHeader:
- if len(hbf) < 2 {
- panic("too small")
- }
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: false,
- EndStream: endStream,
- BlockFragment: hbf[:1],
- })
- ct.fr.WriteContinuation(f.StreamID, true, hbf[1:])
- default:
- panic("bogus mode")
- }
- }
- if expect100Continue != noHeader {
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
- send(expect100Continue)
- }
- // Response headers (1+ frames; 1 or 2 in this test, but never 0)
- {
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"})
- enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"})
- if trailers != noHeader {
- enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"})
- }
- endStream = withData == false && trailers == noHeader
- send(resHeader)
- }
- if withData {
- endStream = trailers == noHeader
- ct.fr.WriteData(f.StreamID, endStream, []byte(resBody))
- }
- if trailers != noHeader {
- endStream = true
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"})
- send(trailers)
- }
- return nil
- }
- }
- }
- ct.run()
-}
-
-func TestTransportReceiveUndeclaredTrailer(t *testing.T) {
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return fmt.Errorf("status code = %v; want 200", res.StatusCode)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil)
- }
- if len(slurp) > 0 {
- return fmt.Errorf("body = %q; want nothing", slurp)
- }
- if _, ok := res.Trailer["Some-Trailer"]; !ok {
- return fmt.Errorf("expected Some-Trailer")
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
-
- var n int
- var hf *HeadersFrame
- for hf == nil && n < 10 {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- hf, _ = f.(*HeadersFrame)
- n++
- }
-
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
-
- // send headers without Trailer header
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
-
- // send trailers
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- return nil
- }
- ct.run()
-}
-
-func TestTransportInvalidTrailer_Pseudo1(t *testing.T) {
- testTransportInvalidTrailer_Pseudo(t, oneHeader)
-}
-func TestTransportInvalidTrailer_Pseudo2(t *testing.T) {
- testTransportInvalidTrailer_Pseudo(t, splitHeader)
-}
-func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) {
- testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) {
- enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"})
- enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
- })
-}
-
-func TestTransportInvalidTrailer_Capital1(t *testing.T) {
- testTransportInvalidTrailer_Capital(t, oneHeader)
-}
-func TestTransportInvalidTrailer_Capital2(t *testing.T) {
- testTransportInvalidTrailer_Capital(t, splitHeader)
-}
-func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) {
- testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) {
- enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
- enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"})
- })
-}
-func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) {
- testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) {
- enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"})
- })
-}
-func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) {
- testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) {
- enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"})
- })
-}
-
-func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) {
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return fmt.Errorf("status code = %v; want 200", res.StatusCode)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != wantErr {
- return fmt.Errorf("res.Body ReadAll error = %q, %#v; want %T of %#v", slurp, err, wantErr, wantErr)
- }
- if len(slurp) > 0 {
- return fmt.Errorf("body = %q; want nothing", slurp)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
-
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *HeadersFrame:
- var endStream bool
- send := func(mode headerType) {
- hbf := buf.Bytes()
- switch mode {
- case oneHeader:
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: endStream,
- BlockFragment: hbf,
- })
- case splitHeader:
- if len(hbf) < 2 {
- panic("too small")
- }
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: false,
- EndStream: endStream,
- BlockFragment: hbf[:1],
- })
- ct.fr.WriteContinuation(f.StreamID, true, hbf[1:])
- default:
- panic("bogus mode")
- }
- }
- // Response headers (1+ frames; 1 or 2 in this test, but never 0)
- {
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"})
- endStream = false
- send(oneHeader)
- }
- // Trailers:
- {
- endStream = true
- buf.Reset()
- writeTrailer(enc)
- send(trailers)
- }
- return nil
- }
- }
- }
- ct.run()
-}
-
-func TestTransportChecksResponseHeaderListSize(t *testing.T) {
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != errResponseHeaderListSize {
- if res != nil {
- res.Body.Close()
- }
- size := int64(0)
- for k, vv := range res.Header {
- for _, v := range vv {
- size += int64(len(k)) + int64(len(v)) + 32
- }
- }
- return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
-
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *HeadersFrame:
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- large := strings.Repeat("a", 1<<10)
- for i := 0; i < 5042; i++ {
- enc.WriteField(hpack.HeaderField{Name: large, Value: large})
- }
- if size, want := buf.Len(), 6329; size != want {
- // Note: this number might change if
- // our hpack implementation
- // changes. That's fine. This is
- // just a sanity check that our
- // response can fit in a single
- // header block fragment frame.
- return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want)
- }
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- return nil
- }
- }
- }
- ct.run()
-}
-
-// Test that the the Transport returns a typed error from Response.Body.Read calls
-// when the server sends an error. (here we use a panic, since that should generate
-// a stream error, but others like cancel should be similar)
-func TestTransportBodyReadErrorType(t *testing.T) {
- doPanic := make(chan bool, 1)
- st := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- w.(http.Flusher).Flush() // force headers out
- <-doPanic
- panic("boom")
- },
- optOnlyServer,
- optQuiet,
- )
- defer st.Close()
-
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
- c := &http.Client{Transport: tr}
-
- res, err := c.Get(st.ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- doPanic <- true
- buf := make([]byte, 100)
- n, err := res.Body.Read(buf)
- want := StreamError{StreamID: 0x1, Code: 0x2}
- if !reflect.DeepEqual(want, err) {
- t.Errorf("Read = %v, %#v; want error %#v", n, err, want)
- }
-}
-
-// golang.org/issue/13924
-// This used to fail after many iterations, especially with -race:
-// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race
-func TestTransportDoubleCloseOnWriteError(t *testing.T) {
- var (
- mu sync.Mutex
- conn net.Conn // to close if set
- )
-
- st := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- mu.Lock()
- defer mu.Unlock()
- if conn != nil {
- conn.Close()
- }
- },
- optOnlyServer,
- )
- defer st.Close()
-
- tr := &Transport{
- TLSClientConfig: tlsConfigInsecure,
- DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
- tc, err := tls.Dial(network, addr, cfg)
- if err != nil {
- return nil, err
- }
- mu.Lock()
- defer mu.Unlock()
- conn = tc
- return tc, nil
- },
- }
- defer tr.CloseIdleConnections()
- c := &http.Client{Transport: tr}
- c.Get(st.ts.URL)
-}
-
-// Test that the http1 Transport.DisableKeepAlives option is respected
-// and connections are closed as soon as idle.
-// See golang.org/issue/14008
-func TestTransportDisableKeepAlives(t *testing.T) {
- st := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, "hi")
- },
- optOnlyServer,
- )
- defer st.Close()
-
- connClosed := make(chan struct{}) // closed on tls.Conn.Close
- tr := &Transport{
- t1: &http.Transport{
- DisableKeepAlives: true,
- },
- TLSClientConfig: tlsConfigInsecure,
- DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
- tc, err := tls.Dial(network, addr, cfg)
- if err != nil {
- return nil, err
- }
- return ¬eCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil
- },
- }
- c := &http.Client{Transport: tr}
- res, err := c.Get(st.ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- if _, err := ioutil.ReadAll(res.Body); err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
-
- select {
- case <-connClosed:
- case <-time.After(1 * time.Second):
- t.Errorf("timeout")
- }
-
-}
-
-// Test concurrent requests with Transport.DisableKeepAlives. We can share connections,
-// but when things are totally idle, it still needs to close.
-func TestTransportDisableKeepAlives_Concurrency(t *testing.T) {
- const D = 25 * time.Millisecond
- st := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {
- time.Sleep(D)
- io.WriteString(w, "hi")
- },
- optOnlyServer,
- )
- defer st.Close()
-
- var dials int32
- var conns sync.WaitGroup
- tr := &Transport{
- t1: &http.Transport{
- DisableKeepAlives: true,
- },
- TLSClientConfig: tlsConfigInsecure,
- DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
- tc, err := tls.Dial(network, addr, cfg)
- if err != nil {
- return nil, err
- }
- atomic.AddInt32(&dials, 1)
- conns.Add(1)
- return ¬eCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil
- },
- }
- c := &http.Client{Transport: tr}
- var reqs sync.WaitGroup
- const N = 20
- for i := 0; i < N; i++ {
- reqs.Add(1)
- if i == N-1 {
- // For the final request, try to make all the
- // others close. This isn't verified in the
- // count, other than the Log statement, since
- // it's so timing dependent. This test is
- // really to make sure we don't interrupt a
- // valid request.
- time.Sleep(D * 2)
- }
- go func() {
- defer reqs.Done()
- res, err := c.Get(st.ts.URL)
- if err != nil {
- t.Error(err)
- return
- }
- if _, err := ioutil.ReadAll(res.Body); err != nil {
- t.Error(err)
- return
- }
- res.Body.Close()
- }()
- }
- reqs.Wait()
- conns.Wait()
- t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N)
-}
-
-type noteCloseConn struct {
- net.Conn
- onceClose sync.Once
- closefn func()
-}
-
-func (c *noteCloseConn) Close() error {
- c.onceClose.Do(c.closefn)
- return c.Conn.Close()
-}
-
-func isTimeout(err error) bool {
- switch err := err.(type) {
- case nil:
- return false
- case *url.Error:
- return isTimeout(err.Err)
- case net.Error:
- return err.Timeout()
- }
- return false
-}
-
-// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent.
-func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) {
- testTransportResponseHeaderTimeout(t, false)
-}
-func TestTransportResponseHeaderTimeout_Body(t *testing.T) {
- testTransportResponseHeaderTimeout(t, true)
-}
-
-func testTransportResponseHeaderTimeout(t *testing.T, body bool) {
- ct := newClientTester(t)
- ct.tr.t1 = &http.Transport{
- ResponseHeaderTimeout: 5 * time.Millisecond,
- }
- ct.client = func() error {
- c := &http.Client{Transport: ct.tr}
- var err error
- var n int64
- const bodySize = 4 << 20
- if body {
- _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize))
- } else {
- _, err = c.Get("https://dummy.tld/")
- }
- if !isTimeout(err) {
- t.Errorf("client expected timeout error; got %#v", err)
- }
- if body && n != bodySize {
- t.Errorf("only read %d bytes of body; want %d", n, bodySize)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- t.Logf("ReadFrame: %v", err)
- return nil
- }
- switch f := f.(type) {
- case *DataFrame:
- dataLen := len(f.Data())
- if dataLen > 0 {
- if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
- return err
- }
- if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {
- return err
- }
- }
- case *RSTStreamFrame:
- if f.StreamID == 1 && f.ErrCode == ErrCodeCancel {
- return nil
- }
- }
- }
- return nil
- }
- ct.run()
-}
-
-func TestTransportDisableCompression(t *testing.T) {
- const body = "sup"
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- want := http.Header{
- "User-Agent": []string{"Go-http-client/2.0"},
- }
- if !reflect.DeepEqual(r.Header, want) {
- t.Errorf("request headers = %v; want %v", r.Header, want)
- }
- }, optOnlyServer)
- defer st.Close()
-
- tr := &Transport{
- TLSClientConfig: tlsConfigInsecure,
- t1: &http.Transport{
- DisableCompression: true,
- },
- }
- defer tr.CloseIdleConnections()
-
- req, err := http.NewRequest("GET", st.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- res, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
-}
-
-// RFC 7540 section 8.1.2.2
-func TestTransportRejectsConnHeaders(t *testing.T) {
- st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- var got []string
- for k := range r.Header {
- got = append(got, k)
- }
- sort.Strings(got)
- w.Header().Set("Got-Header", strings.Join(got, ","))
- }, optOnlyServer)
- defer st.Close()
-
- tr := &Transport{TLSClientConfig: tlsConfigInsecure}
- defer tr.CloseIdleConnections()
-
- tests := []struct {
- key string
- value []string
- want string
- }{
- {
- key: "Upgrade",
- value: []string{"anything"},
- want: "ERROR: http2: invalid Upgrade request header",
- },
- {
- key: "Connection",
- value: []string{"foo"},
- want: "ERROR: http2: invalid Connection request header",
- },
- {
- key: "Connection",
- value: []string{"close"},
- want: "Accept-Encoding,User-Agent",
- },
- {
- key: "Connection",
- value: []string{"close", "something-else"},
- want: "ERROR: http2: invalid Connection request header",
- },
- {
- key: "Connection",
- value: []string{"keep-alive"},
- want: "Accept-Encoding,User-Agent",
- },
- {
- key: "Proxy-Connection", // just deleted and ignored
- value: []string{"keep-alive"},
- want: "Accept-Encoding,User-Agent",
- },
- {
- key: "Transfer-Encoding",
- value: []string{""},
- want: "Accept-Encoding,User-Agent",
- },
- {
- key: "Transfer-Encoding",
- value: []string{"foo"},
- want: "ERROR: http2: invalid Transfer-Encoding request header",
- },
- {
- key: "Transfer-Encoding",
- value: []string{"chunked"},
- want: "Accept-Encoding,User-Agent",
- },
- {
- key: "Transfer-Encoding",
- value: []string{"chunked", "other"},
- want: "ERROR: http2: invalid Transfer-Encoding request header",
- },
- {
- key: "Content-Length",
- value: []string{"123"},
- want: "Accept-Encoding,User-Agent",
- },
- }
-
- for _, tt := range tests {
- req, _ := http.NewRequest("GET", st.ts.URL, nil)
- req.Header[tt.key] = tt.value
- res, err := tr.RoundTrip(req)
- var got string
- if err != nil {
- got = fmt.Sprintf("ERROR: %v", err)
- } else {
- got = res.Header.Get("Got-Header")
- res.Body.Close()
- }
- if got != tt.want {
- t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want)
- }
- }
-}
-
-// Tests that gzipReader doesn't crash on a second Read call following
-// the first Read call's gzip.NewReader returning an error.
-func TestGzipReader_DoubleReadCrash(t *testing.T) {
- gz := &gzipReader{
- body: ioutil.NopCloser(strings.NewReader("0123456789")),
- }
- var buf [1]byte
- n, err1 := gz.Read(buf[:])
- if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") {
- t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1)
- }
- n, err2 := gz.Read(buf[:])
- if n != 0 || err2 != err1 {
- t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1)
- }
-}
-
-func TestTransportNewTLSConfig(t *testing.T) {
- tests := [...]struct {
- conf *tls.Config
- host string
- want *tls.Config
- }{
- // Normal case.
- 0: {
- conf: nil,
- host: "foo.com",
- want: &tls.Config{
- ServerName: "foo.com",
- NextProtos: []string{NextProtoTLS},
- },
- },
-
- // User-provided name (bar.com) takes precedence:
- 1: {
- conf: &tls.Config{
- ServerName: "bar.com",
- },
- host: "foo.com",
- want: &tls.Config{
- ServerName: "bar.com",
- NextProtos: []string{NextProtoTLS},
- },
- },
-
- // NextProto is prepended:
- 2: {
- conf: &tls.Config{
- NextProtos: []string{"foo", "bar"},
- },
- host: "example.com",
- want: &tls.Config{
- ServerName: "example.com",
- NextProtos: []string{NextProtoTLS, "foo", "bar"},
- },
- },
-
- // NextProto is not duplicated:
- 3: {
- conf: &tls.Config{
- NextProtos: []string{"foo", "bar", NextProtoTLS},
- },
- host: "example.com",
- want: &tls.Config{
- ServerName: "example.com",
- NextProtos: []string{"foo", "bar", NextProtoTLS},
- },
- },
- }
- for i, tt := range tests {
- tr := &Transport{TLSClientConfig: tt.conf}
- got := tr.newTLSConfig(tt.host)
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("%d. got %#v; want %#v", i, got, tt.want)
- }
- }
-}
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
deleted file mode 100644
index 5297a4bf..00000000
--- a/vendor/golang.org/x/net/http2/write.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bytes"
- "fmt"
- "log"
- "net/http"
- "sort"
- "time"
-
- "golang.org/x/net/http2/hpack"
-)
-
-// writeFramer is implemented by any type that is used to write frames.
-type writeFramer interface {
- writeFrame(writeContext) error
-}
-
-// writeContext is the interface needed by the various frame writer
-// types below. All the writeFrame methods below are scheduled via the
-// frame writing scheduler (see writeScheduler in writesched.go).
-//
-// This interface is implemented by *serverConn.
-//
-// TODO: decide whether to a) use this in the client code (which didn't
-// end up using this yet, because it has a simpler design, not
-// currently implementing priorities), or b) delete this and
-// make the server code a bit more concrete.
-type writeContext interface {
- Framer() *Framer
- Flush() error
- CloseConn() error
- // HeaderEncoder returns an HPACK encoder that writes to the
- // returned buffer.
- HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
-}
-
-// endsStream reports whether the given frame writer w will locally
-// close the stream.
-func endsStream(w writeFramer) bool {
- switch v := w.(type) {
- case *writeData:
- return v.endStream
- case *writeResHeaders:
- return v.endStream
- case nil:
- // This can only happen if the caller reuses w after it's
- // been intentionally nil'ed out to prevent use. Keep this
- // here to catch future refactoring breaking it.
- panic("endsStream called on nil writeFramer")
- }
- return false
-}
-
-type flushFrameWriter struct{}
-
-func (flushFrameWriter) writeFrame(ctx writeContext) error {
- return ctx.Flush()
-}
-
-type writeSettings []Setting
-
-func (s writeSettings) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteSettings([]Setting(s)...)
-}
-
-type writeGoAway struct {
- maxStreamID uint32
- code ErrCode
-}
-
-func (p *writeGoAway) writeFrame(ctx writeContext) error {
- err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
- if p.code != 0 {
- ctx.Flush() // ignore error: we're hanging up on them anyway
- time.Sleep(50 * time.Millisecond)
- ctx.CloseConn()
- }
- return err
-}
-
-type writeData struct {
- streamID uint32
- p []byte
- endStream bool
-}
-
-func (w *writeData) String() string {
- return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
-}
-
-func (w *writeData) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
-}
-
-// handlerPanicRST is the message sent from handler goroutines when
-// the handler panics.
-type handlerPanicRST struct {
- StreamID uint32
-}
-
-func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
-}
-
-func (se StreamError) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
-}
-
-type writePingAck struct{ pf *PingFrame }
-
-func (w writePingAck) writeFrame(ctx writeContext) error {
- return ctx.Framer().WritePing(true, w.pf.Data)
-}
-
-type writeSettingsAck struct{}
-
-func (writeSettingsAck) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteSettingsAck()
-}
-
-// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
-// for HTTP response headers or trailers from a server handler.
-type writeResHeaders struct {
- streamID uint32
- httpResCode int // 0 means no ":status" line
- h http.Header // may be nil
- trailers []string // if non-nil, which keys of h to write. nil means all.
- endStream bool
-
- date string
- contentType string
- contentLength string
-}
-
-func encKV(enc *hpack.Encoder, k, v string) {
- if VerboseLogs {
- log.Printf("http2: server encoding header %q = %q", k, v)
- }
- enc.WriteField(hpack.HeaderField{Name: k, Value: v})
-}
-
-func (w *writeResHeaders) writeFrame(ctx writeContext) error {
- enc, buf := ctx.HeaderEncoder()
- buf.Reset()
-
- if w.httpResCode != 0 {
- encKV(enc, ":status", httpCodeString(w.httpResCode))
- }
-
- encodeHeaders(enc, w.h, w.trailers)
-
- if w.contentType != "" {
- encKV(enc, "content-type", w.contentType)
- }
- if w.contentLength != "" {
- encKV(enc, "content-length", w.contentLength)
- }
- if w.date != "" {
- encKV(enc, "date", w.date)
- }
-
- headerBlock := buf.Bytes()
- if len(headerBlock) == 0 && w.trailers == nil {
- panic("unexpected empty hpack")
- }
-
- // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
- // that all peers must support (16KB). Later we could care
- // more and send larger frames if the peer advertised it, but
- // there's little point. Most headers are small anyway (so we
- // generally won't have CONTINUATION frames), and extra frames
- // only waste 9 bytes anyway.
- const maxFrameSize = 16384
-
- first := true
- for len(headerBlock) > 0 {
- frag := headerBlock
- if len(frag) > maxFrameSize {
- frag = frag[:maxFrameSize]
- }
- headerBlock = headerBlock[len(frag):]
- endHeaders := len(headerBlock) == 0
- var err error
- if first {
- first = false
- err = ctx.Framer().WriteHeaders(HeadersFrameParam{
- StreamID: w.streamID,
- BlockFragment: frag,
- EndStream: w.endStream,
- EndHeaders: endHeaders,
- })
- } else {
- err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-type write100ContinueHeadersFrame struct {
- streamID uint32
-}
-
-func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
- enc, buf := ctx.HeaderEncoder()
- buf.Reset()
- encKV(enc, ":status", "100")
- return ctx.Framer().WriteHeaders(HeadersFrameParam{
- StreamID: w.streamID,
- BlockFragment: buf.Bytes(),
- EndStream: false,
- EndHeaders: true,
- })
-}
-
-type writeWindowUpdate struct {
- streamID uint32 // or 0 for conn-level
- n uint32
-}
-
-func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
-}
-
-func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
- // TODO: garbage. pool sorters like http1? hot path for 1 key?
- if keys == nil {
- keys = make([]string, 0, len(h))
- for k := range h {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- }
- for _, k := range keys {
- vv := h[k]
- k = lowerHeader(k)
- if !validHeaderFieldName(k) {
- // TODO: return an error? golang.org/issue/14048
- // For now just omit it.
- continue
- }
- isTE := k == "transfer-encoding"
- for _, v := range vv {
- if !validHeaderFieldValue(v) {
- // TODO: return an error? golang.org/issue/14048
- // For now just omit it.
- continue
- }
- // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
- if isTE && v != "trailers" {
- continue
- }
- encKV(enc, k, v)
- }
- }
-}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
deleted file mode 100644
index c24316ce..00000000
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import "fmt"
-
-// frameWriteMsg is a request to write a frame.
-type frameWriteMsg struct {
- // write is the interface value that does the writing, once the
- // writeScheduler (below) has decided to select this frame
- // to write. The write functions are all defined in write.go.
- write writeFramer
-
- stream *stream // used for prioritization. nil for non-stream frames.
-
- // done, if non-nil, must be a buffered channel with space for
- // 1 message and is sent the return value from write (or an
- // earlier error) when the frame has been written.
- done chan error
-}
-
-// for debugging only:
-func (wm frameWriteMsg) String() string {
- var streamID uint32
- if wm.stream != nil {
- streamID = wm.stream.id
- }
- var des string
- if s, ok := wm.write.(fmt.Stringer); ok {
- des = s.String()
- } else {
- des = fmt.Sprintf("%T", wm.write)
- }
- return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
-}
-
-// writeScheduler tracks pending frames to write, priorities, and decides
-// the next one to use. It is not thread-safe.
-type writeScheduler struct {
- // zero are frames not associated with a specific stream.
- // They're sent before any stream-specific freams.
- zero writeQueue
-
- // maxFrameSize is the maximum size of a DATA frame
- // we'll write. Must be non-zero and between 16K-16M.
- maxFrameSize uint32
-
- // sq contains the stream-specific queues, keyed by stream ID.
- // when a stream is idle, it's deleted from the map.
- sq map[uint32]*writeQueue
-
- // canSend is a slice of memory that's reused between frame
- // scheduling decisions to hold the list of writeQueues (from sq)
- // which have enough flow control data to send. After canSend is
- // built, the best is selected.
- canSend []*writeQueue
-
- // pool of empty queues for reuse.
- queuePool []*writeQueue
-}
-
-func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
- if len(q.s) != 0 {
- panic("queue must be empty")
- }
- ws.queuePool = append(ws.queuePool, q)
-}
-
-func (ws *writeScheduler) getEmptyQueue() *writeQueue {
- ln := len(ws.queuePool)
- if ln == 0 {
- return new(writeQueue)
- }
- q := ws.queuePool[ln-1]
- ws.queuePool = ws.queuePool[:ln-1]
- return q
-}
-
-func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
-
-func (ws *writeScheduler) add(wm frameWriteMsg) {
- st := wm.stream
- if st == nil {
- ws.zero.push(wm)
- } else {
- ws.streamQueue(st.id).push(wm)
- }
-}
-
-func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
- if q, ok := ws.sq[streamID]; ok {
- return q
- }
- if ws.sq == nil {
- ws.sq = make(map[uint32]*writeQueue)
- }
- q := ws.getEmptyQueue()
- ws.sq[streamID] = q
- return q
-}
-
-// take returns the most important frame to write and removes it from the scheduler.
-// It is illegal to call this if the scheduler is empty or if there are no connection-level
-// flow control bytes available.
-func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
- if ws.maxFrameSize == 0 {
- panic("internal error: ws.maxFrameSize not initialized or invalid")
- }
-
- // If there any frames not associated with streams, prefer those first.
- // These are usually SETTINGS, etc.
- if !ws.zero.empty() {
- return ws.zero.shift(), true
- }
- if len(ws.sq) == 0 {
- return
- }
-
- // Next, prioritize frames on streams that aren't DATA frames (no cost).
- for id, q := range ws.sq {
- if q.firstIsNoCost() {
- return ws.takeFrom(id, q)
- }
- }
-
- // Now, all that remains are DATA frames with non-zero bytes to
- // send. So pick the best one.
- if len(ws.canSend) != 0 {
- panic("should be empty")
- }
- for _, q := range ws.sq {
- if n := ws.streamWritableBytes(q); n > 0 {
- ws.canSend = append(ws.canSend, q)
- }
- }
- if len(ws.canSend) == 0 {
- return
- }
- defer ws.zeroCanSend()
-
- // TODO: find the best queue
- q := ws.canSend[0]
-
- return ws.takeFrom(q.streamID(), q)
-}
-
-// zeroCanSend is defered from take.
-func (ws *writeScheduler) zeroCanSend() {
- for i := range ws.canSend {
- ws.canSend[i] = nil
- }
- ws.canSend = ws.canSend[:0]
-}
-
-// streamWritableBytes returns the number of DATA bytes we could write
-// from the given queue's stream, if this stream/queue were
-// selected. It is an error to call this if q's head isn't a
-// *writeData.
-func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
- wm := q.head()
- ret := wm.stream.flow.available() // max we can write
- if ret == 0 {
- return 0
- }
- if int32(ws.maxFrameSize) < ret {
- ret = int32(ws.maxFrameSize)
- }
- if ret == 0 {
- panic("internal error: ws.maxFrameSize not initialized or invalid")
- }
- wd := wm.write.(*writeData)
- if len(wd.p) < int(ret) {
- ret = int32(len(wd.p))
- }
- return ret
-}
-
-func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
- wm = q.head()
- // If the first item in this queue costs flow control tokens
- // and we don't have enough, write as much as we can.
- if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
- allowed := wm.stream.flow.available() // max we can write
- if allowed == 0 {
- // No quota available. Caller can try the next stream.
- return frameWriteMsg{}, false
- }
- if int32(ws.maxFrameSize) < allowed {
- allowed = int32(ws.maxFrameSize)
- }
- // TODO: further restrict the allowed size, because even if
- // the peer says it's okay to write 16MB data frames, we might
- // want to write smaller ones to properly weight competing
- // streams' priorities.
-
- if len(wd.p) > int(allowed) {
- wm.stream.flow.take(allowed)
- chunk := wd.p[:allowed]
- wd.p = wd.p[allowed:]
- // Make up a new write message of a valid size, rather
- // than shifting one off the queue.
- return frameWriteMsg{
- stream: wm.stream,
- write: &writeData{
- streamID: wd.streamID,
- p: chunk,
- // even if the original had endStream set, there
- // arebytes remaining because len(wd.p) > allowed,
- // so we know endStream is false:
- endStream: false,
- },
- // our caller is blocking on the final DATA frame, not
- // these intermediates, so no need to wait:
- done: nil,
- }, true
- }
- wm.stream.flow.take(int32(len(wd.p)))
- }
-
- q.shift()
- if q.empty() {
- ws.putEmptyQueue(q)
- delete(ws.sq, id)
- }
- return wm, true
-}
-
-func (ws *writeScheduler) forgetStream(id uint32) {
- q, ok := ws.sq[id]
- if !ok {
- return
- }
- delete(ws.sq, id)
-
- // But keep it for others later.
- for i := range q.s {
- q.s[i] = frameWriteMsg{}
- }
- q.s = q.s[:0]
- ws.putEmptyQueue(q)
-}
-
-type writeQueue struct {
- s []frameWriteMsg
-}
-
-// streamID returns the stream ID for a non-empty stream-specific queue.
-func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
-
-func (q *writeQueue) empty() bool { return len(q.s) == 0 }
-
-func (q *writeQueue) push(wm frameWriteMsg) {
- q.s = append(q.s, wm)
-}
-
-// head returns the next item that would be removed by shift.
-func (q *writeQueue) head() frameWriteMsg {
- if len(q.s) == 0 {
- panic("invalid use of queue")
- }
- return q.s[0]
-}
-
-func (q *writeQueue) shift() frameWriteMsg {
- if len(q.s) == 0 {
- panic("invalid use of queue")
- }
- wm := q.s[0]
- // TODO: less copy-happy queue.
- copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = frameWriteMsg{}
- q.s = q.s[:len(q.s)-1]
- return wm
-}
-
-func (q *writeQueue) firstIsNoCost() bool {
- if df, ok := q.s[0].write.(*writeData); ok {
- return len(df.p) == 0
- }
- return true
-}
diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go
deleted file mode 100644
index e0f420a1..00000000
--- a/vendor/golang.org/x/net/http2/z_spec_test.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bytes"
- "encoding/xml"
- "flag"
- "fmt"
- "io"
- "os"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "sync"
- "testing"
-)
-
-var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
-
-// The global map of sentence coverage for the http2 spec.
-var defaultSpecCoverage specCoverage
-
-var loadSpecOnce sync.Once
-
-func loadSpec() {
- if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
- panic(err)
- } else {
- defaultSpecCoverage = readSpecCov(f)
- f.Close()
- }
-}
-
-// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
-// "covered" will be included in report outputed by TestSpecCoverage.
-func covers(sec, sentences string) {
- loadSpecOnce.Do(loadSpec)
- defaultSpecCoverage.cover(sec, sentences)
-}
-
-type specPart struct {
- section string
- sentence string
-}
-
-func (ss specPart) Less(oo specPart) bool {
- atoi := func(s string) int {
- n, err := strconv.Atoi(s)
- if err != nil {
- panic(err)
- }
- return n
- }
- a := strings.Split(ss.section, ".")
- b := strings.Split(oo.section, ".")
- for len(a) > 0 {
- if len(b) == 0 {
- return false
- }
- x, y := atoi(a[0]), atoi(b[0])
- if x == y {
- a, b = a[1:], b[1:]
- continue
- }
- return x < y
- }
- if len(b) > 0 {
- return true
- }
- return false
-}
-
-type bySpecSection []specPart
-
-func (a bySpecSection) Len() int { return len(a) }
-func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
-func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type specCoverage struct {
- coverage map[specPart]bool
- d *xml.Decoder
-}
-
-func joinSection(sec []int) string {
- s := fmt.Sprintf("%d", sec[0])
- for _, n := range sec[1:] {
- s = fmt.Sprintf("%s.%d", s, n)
- }
- return s
-}
-
-func (sc specCoverage) readSection(sec []int) {
- var (
- buf = new(bytes.Buffer)
- sub = 0
- )
- for {
- tk, err := sc.d.Token()
- if err != nil {
- if err == io.EOF {
- return
- }
- panic(err)
- }
- switch v := tk.(type) {
- case xml.StartElement:
- if skipElement(v) {
- if err := sc.d.Skip(); err != nil {
- panic(err)
- }
- if v.Name.Local == "section" {
- sub++
- }
- break
- }
- switch v.Name.Local {
- case "section":
- sub++
- sc.readSection(append(sec, sub))
- case "xref":
- buf.Write(sc.readXRef(v))
- }
- case xml.CharData:
- if len(sec) == 0 {
- break
- }
- buf.Write(v)
- case xml.EndElement:
- if v.Name.Local == "section" {
- sc.addSentences(joinSection(sec), buf.String())
- return
- }
- }
- }
-}
-
-func (sc specCoverage) readXRef(se xml.StartElement) []byte {
- var b []byte
- for {
- tk, err := sc.d.Token()
- if err != nil {
- panic(err)
- }
- switch v := tk.(type) {
- case xml.CharData:
- if b != nil {
- panic("unexpected CharData")
- }
- b = []byte(string(v))
- case xml.EndElement:
- if v.Name.Local != "xref" {
- panic("expected ")
- }
- if b != nil {
- return b
- }
- sig := attrSig(se)
- switch sig {
- case "target":
- return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
- case "fmt-of,rel,target", "fmt-,,rel,target":
- return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
- case "fmt-of,sec,target", "fmt-,,sec,target":
- return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
- case "fmt-of,rel,sec,target":
- return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
- default:
- panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
- }
- default:
- panic(fmt.Sprintf("unexpected tag %q", v))
- }
- }
-}
-
-var skipAnchor = map[string]bool{
- "intro": true,
- "Overview": true,
-}
-
-var skipTitle = map[string]bool{
- "Acknowledgements": true,
- "Change Log": true,
- "Document Organization": true,
- "Conventions and Terminology": true,
-}
-
-func skipElement(s xml.StartElement) bool {
- switch s.Name.Local {
- case "artwork":
- return true
- case "section":
- for _, attr := range s.Attr {
- switch attr.Name.Local {
- case "anchor":
- if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
- return true
- }
- case "title":
- if skipTitle[attr.Value] {
- return true
- }
- }
- }
- }
- return false
-}
-
-func readSpecCov(r io.Reader) specCoverage {
- sc := specCoverage{
- coverage: map[specPart]bool{},
- d: xml.NewDecoder(r)}
- sc.readSection(nil)
- return sc
-}
-
-func (sc specCoverage) addSentences(sec string, sentence string) {
- for _, s := range parseSentences(sentence) {
- sc.coverage[specPart{sec, s}] = false
- }
-}
-
-func (sc specCoverage) cover(sec string, sentence string) {
- for _, s := range parseSentences(sentence) {
- p := specPart{sec, s}
- if _, ok := sc.coverage[p]; !ok {
- panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
- }
- sc.coverage[specPart{sec, s}] = true
- }
-
-}
-
-var whitespaceRx = regexp.MustCompile(`\s+`)
-
-func parseSentences(sens string) []string {
- sens = strings.TrimSpace(sens)
- if sens == "" {
- return nil
- }
- ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
- for i, s := range ss {
- s = strings.TrimSpace(s)
- if !strings.HasSuffix(s, ".") {
- s += "."
- }
- ss[i] = s
- }
- return ss
-}
-
-func TestSpecParseSentences(t *testing.T) {
- tests := []struct {
- ss string
- want []string
- }{
- {"Sentence 1. Sentence 2.",
- []string{
- "Sentence 1.",
- "Sentence 2.",
- }},
- {"Sentence 1. \nSentence 2.\tSentence 3.",
- []string{
- "Sentence 1.",
- "Sentence 2.",
- "Sentence 3.",
- }},
- }
-
- for i, tt := range tests {
- got := parseSentences(tt.ss)
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("%d: got = %q, want %q", i, got, tt.want)
- }
- }
-}
-
-func TestSpecCoverage(t *testing.T) {
- if !*coverSpec {
- t.Skip()
- }
-
- loadSpecOnce.Do(loadSpec)
-
- var (
- list []specPart
- cv = defaultSpecCoverage.coverage
- total = len(cv)
- complete = 0
- )
-
- for sp, touched := range defaultSpecCoverage.coverage {
- if touched {
- complete++
- } else {
- list = append(list, sp)
- }
- }
- sort.Stable(bySpecSection(list))
-
- if testing.Short() && len(list) > 5 {
- list = list[:5]
- }
-
- for _, p := range list {
- t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
- }
-
- t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100)
-}
-
-func attrSig(se xml.StartElement) string {
- var names []string
- for _, attr := range se.Attr {
- if attr.Name.Local == "fmt" {
- names = append(names, "fmt-"+attr.Value)
- } else {
- names = append(names, attr.Name.Local)
- }
- }
- sort.Strings(names)
- return strings.Join(names, ",")
-}
-
-func attrValue(se xml.StartElement, attr string) string {
- for _, a := range se.Attr {
- if a.Name.Local == attr {
- return a.Value
- }
- }
- panic("unknown attribute " + attr)
-}
-
-func TestSpecPartLess(t *testing.T) {
- tests := []struct {
- sec1, sec2 string
- want bool
- }{
- {"6.2.1", "6.2", false},
- {"6.2", "6.2.1", true},
- {"6.10", "6.10.1", true},
- {"6.10", "6.1.1", false}, // 10, not 1
- {"6.1", "6.1", false}, // equal, so not less
- }
- for _, tt := range tests {
- got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
- if got != tt.want {
- t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
- }
- }
-}
diff --git a/vendor/manifest b/vendor/manifest
index 419d6291..e2f73360 100644
--- a/vendor/manifest
+++ b/vendor/manifest
@@ -247,13 +247,6 @@
"branch": "master",
"path": "/ssh/terminal"
},
- {
- "importpath": "golang.org/x/net/http2",
- "repository": "https://go.googlesource.com/net",
- "revision": "6acef71eb69611914f7a30939ea9f6e194c78172",
- "branch": "master",
- "path": "/http2"
- },
{
"importpath": "golang.org/x/text/transform",
"repository": "https://go.googlesource.com/text",