Merge pull request #1203 from hashicorp/f-gzip
Compress HTTP API responses
This commit is contained in:
commit
a4bb011a43
45
api/api.go
45
api/api.go
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -194,6 +195,7 @@ func (r *request) toHTTP() (*http.Request, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
req.URL.Host = r.url.Host
|
||||
req.URL.Scheme = r.url.Scheme
|
||||
req.Host = r.url.Host
|
||||
|
@ -231,6 +233,26 @@ func (c *Client) newRequest(method, path string) *request {
|
|||
return r
|
||||
}
|
||||
|
||||
// multiCloser is to wrap a ReadCloser such that when close is called, multiple
|
||||
// Closes occur.
|
||||
type multiCloser struct {
|
||||
reader io.Reader
|
||||
inorderClose []io.Closer
|
||||
}
|
||||
|
||||
func (m *multiCloser) Close() error {
|
||||
for _, c := range m.inorderClose {
|
||||
if err := c.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *multiCloser) Read(p []byte) (int, error) {
|
||||
return m.reader.Read(p)
|
||||
}
|
||||
|
||||
// doRequest runs a request with our client
|
||||
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
||||
req, err := r.toHTTP()
|
||||
|
@ -240,6 +262,29 @@ func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
|||
start := time.Now()
|
||||
resp, err := c.config.HttpClient.Do(req)
|
||||
diff := time.Now().Sub(start)
|
||||
|
||||
// If the response is compressed, we swap the body's reader.
|
||||
if resp != nil && resp.Header != nil {
|
||||
var reader io.ReadCloser
|
||||
switch resp.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
greader, err := gzip.NewReader(resp.Body)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
// The gzip reader doesn't close the wrapped reader so we use
|
||||
// multiCloser.
|
||||
reader = &multiCloser{
|
||||
reader: greader,
|
||||
inorderClose: []io.Closer{greader, resp.Body},
|
||||
}
|
||||
default:
|
||||
reader = resp.Body
|
||||
}
|
||||
resp.Body = reader
|
||||
}
|
||||
|
||||
return diff, resp, err
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/NYTimes/gziphandler"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/ugorji/go/codec"
|
||||
)
|
||||
|
@ -65,7 +66,7 @@ func NewHTTPServer(agent *Agent, config *Config, logOutput io.Writer) (*HTTPServ
|
|||
srv.registerHandlers(config.EnableDebug)
|
||||
|
||||
// Start the server
|
||||
go http.Serve(ln, mux)
|
||||
go http.Serve(ln, gziphandler.GzipHandler(mux))
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
|
@ -86,7 +87,7 @@ func newScadaHttp(agent *Agent, list net.Listener) *HTTPServer {
|
|||
srv.registerHandlers(false) // Never allow debug for SCADA
|
||||
|
||||
// Start the server
|
||||
go http.Serve(list, mux)
|
||||
go http.Serve(list, gziphandler.GzipHandler(mux))
|
||||
return srv
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
Copyright (c) 2015 The New York Times Company
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this library except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,52 @@
|
|||
Gzip Handler
|
||||
============
|
||||
|
||||
This is a tiny Go package which wraps HTTP handlers to transparently gzip the
|
||||
response body, for clients which support it. Although it's usually simpler to
|
||||
leave that to a reverse proxy (like nginx or Varnish), this package is useful
|
||||
when that's undesirable.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Call `GzipHandler` with any handler (an object which implements the
|
||||
`http.Handler` interface), and it'll return a new handler which gzips the
|
||||
response. For example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"github.com/NYTimes/gziphandler"
|
||||
)
|
||||
|
||||
func main() {
|
||||
withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "Hello, World")
|
||||
})
|
||||
|
||||
withGz := gziphandler.GzipHandler(withoutGz)
|
||||
|
||||
http.Handle("/", withGz)
|
||||
http.ListenAndServe("0.0.0.0:8000", nil)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
The docs can be found at [godoc.org] [docs], as usual.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0] [license].
|
||||
|
||||
|
||||
|
||||
|
||||
[docs]: https://godoc.org/github.com/nytimes/gziphandler
|
||||
[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md
|
|
@ -0,0 +1,144 @@
|
|||
package gziphandler
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
vary = "Vary"
|
||||
acceptEncoding = "Accept-Encoding"
|
||||
contentEncoding = "Content-Encoding"
|
||||
)
|
||||
|
||||
type codings map[string]float64
|
||||
|
||||
// The default qvalue to assign to an encoding if no explicit qvalue is set.
|
||||
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
|
||||
// The examples seem to indicate that it is.
|
||||
const DEFAULT_QVALUE = 1.0
|
||||
|
||||
var gzipWriterPool = sync.Pool{
|
||||
New: func() interface{} { return gzip.NewWriter(nil) },
|
||||
}
|
||||
|
||||
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
|
||||
// bytes before writing them to the underlying response. This doesn't set the
|
||||
// Content-Encoding header, nor close the writers, so don't forget to do that.
|
||||
type GzipResponseWriter struct {
|
||||
gw *gzip.Writer
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
// Write appends data to the gzip writer.
|
||||
func (w GzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if _, ok := w.Header()["Content-Type"]; !ok {
|
||||
// If content type is not set, infer it from the uncompressed body.
|
||||
w.Header().Set("Content-Type", http.DetectContentType(b))
|
||||
}
|
||||
return w.gw.Write(b)
|
||||
}
|
||||
|
||||
// Flush flushes the underlying *gzip.Writer and then the underlying
|
||||
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
|
||||
// an http.Flusher.
|
||||
func (w GzipResponseWriter) Flush() {
|
||||
w.gw.Flush()
|
||||
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
fw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
|
||||
// the client supports it (via the Accept-Encoding header).
|
||||
func GzipHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add(vary, acceptEncoding)
|
||||
|
||||
if acceptsGzip(r) {
|
||||
// Bytes written during ServeHTTP are redirected to this gzip writer
|
||||
// before being written to the underlying response.
|
||||
gzw := gzipWriterPool.Get().(*gzip.Writer)
|
||||
defer gzipWriterPool.Put(gzw)
|
||||
gzw.Reset(w)
|
||||
defer gzw.Close()
|
||||
|
||||
w.Header().Set(contentEncoding, "gzip")
|
||||
h.ServeHTTP(GzipResponseWriter{gzw, w}, r)
|
||||
} else {
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// acceptsGzip returns true if the given HTTP request indicates that it will
|
||||
// accept a gzippped response.
|
||||
func acceptsGzip(r *http.Request) bool {
|
||||
acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
|
||||
return acceptedEncodings["gzip"] > 0.0
|
||||
}
|
||||
|
||||
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
|
||||
// appear in an Accept-Encoding header. It returns a map of content-codings to
|
||||
// quality values, and an error containing the errors encounted. It's probably
|
||||
// safe to ignore those, because silently ignoring errors is how the internet
|
||||
// works.
|
||||
//
|
||||
// See: http://tools.ietf.org/html/rfc2616#section-14.3
|
||||
func parseEncodings(s string) (codings, error) {
|
||||
c := make(codings)
|
||||
e := make([]string, 0)
|
||||
|
||||
for _, ss := range strings.Split(s, ",") {
|
||||
coding, qvalue, err := parseCoding(ss)
|
||||
|
||||
if err != nil {
|
||||
e = append(e, err.Error())
|
||||
|
||||
} else {
|
||||
c[coding] = qvalue
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (adammck): Use a proper multi-error struct, so the individual errors
|
||||
// can be extracted if anyone cares.
|
||||
if len(e) > 0 {
|
||||
return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// parseCoding parses a single conding (content-coding with an optional qvalue),
|
||||
// as might appear in an Accept-Encoding header. It attempts to forgive minor
|
||||
// formatting errors.
|
||||
func parseCoding(s string) (coding string, qvalue float64, err error) {
|
||||
for n, part := range strings.Split(s, ";") {
|
||||
part = strings.TrimSpace(part)
|
||||
qvalue = DEFAULT_QVALUE
|
||||
|
||||
if n == 0 {
|
||||
coding = strings.ToLower(part)
|
||||
|
||||
} else if strings.HasPrefix(part, "q=") {
|
||||
qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
|
||||
|
||||
if qvalue < 0.0 {
|
||||
qvalue = 0.0
|
||||
|
||||
} else if qvalue > 1.0 {
|
||||
qvalue = 1.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if coding == "" {
|
||||
err = fmt.Errorf("empty content-coding")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -2,6 +2,12 @@
|
|||
"comment": "",
|
||||
"ignore": "test",
|
||||
"package": [
|
||||
{
|
||||
"checksumSHA1": "XeG94RjA9o/0wo9Fuw6NSRGYnjk=",
|
||||
"path": "github.com/NYTimes/gziphandler",
|
||||
"revision": "63027b26b87e2ae2ce3810393d4b81021cfd3a35",
|
||||
"revisionTime": "2016-04-19T20:25:41Z"
|
||||
},
|
||||
{
|
||||
"comment": "v0.8.7-87-g4b6ea73",
|
||||
"path": "github.com/Sirupsen/logrus",
|
||||
|
|
|
@ -87,6 +87,11 @@ servicing the request. A target region can be explicitly specified with the `reg
|
|||
parameter. The request will be transparently forwarded and serviced by a server in the
|
||||
appropriate region.
|
||||
|
||||
## Compressed Responses
|
||||
|
||||
The HTTP API will gzip the response if the HTTP request denotes that the client accepts
|
||||
gzip compression. This is achieved via the standard, `Accept-Encoding: gzip`
|
||||
|
||||
## Formatted JSON Output
|
||||
|
||||
By default, the output of all HTTP API requests is minimized JSON. If the client passes `pretty`
|
||||
|
|
Loading…
Reference in New Issue