open-nomad/vendor/github.com/NYTimes/gziphandler/gzip.go

408 lines
12 KiB
Go
Raw Normal View History

package gziphandler
import (
2017-09-12 21:26:13 +00:00
"bufio"
"compress/gzip"
"fmt"
2017-09-12 21:26:13 +00:00
"io"
"net"
"net/http"
"strconv"
"strings"
"sync"
)
const (
vary = "Vary"
acceptEncoding = "Accept-Encoding"
contentEncoding = "Content-Encoding"
2017-09-12 21:26:13 +00:00
contentType = "Content-Type"
contentLength = "Content-Length"
)
type codings map[string]float64
2017-09-12 21:26:13 +00:00
const (
// DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
// The examples seem to indicate that it is.
DefaultQValue = 1.0
// DefaultMinSize defines the minimum size to reach to enable compression.
// It's 512 bytes.
DefaultMinSize = 512
)
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
// gzip.Writers. Use poolIndex to covert a compression level to an index into
// gzipWriterPools.
var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool
2017-09-12 21:26:13 +00:00
func init() {
for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ {
addLevelPool(i)
}
addLevelPool(gzip.DefaultCompression)
}
// poolIndex maps a compression level to its index into gzipWriterPools. It
// assumes that level is a valid gzip compression level.
func poolIndex(level int) int {
// gzip.DefaultCompression == -1, so we need to treat it special.
if level == gzip.DefaultCompression {
return gzip.BestCompression - gzip.BestSpeed + 1
}
return level - gzip.BestSpeed
}
func addLevelPool(level int) {
gzipWriterPools[poolIndex(level)] = &sync.Pool{
New: func() interface{} {
// NewWriterLevel only returns error on a bad level, we are guaranteeing
// that this will be a valid level so it is okay to ignore the returned
// error.
w, _ := gzip.NewWriterLevel(nil, level)
return w
},
}
}
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
2017-09-12 21:26:13 +00:00
// bytes before writing them to the underlying response. This doesn't close the
// writers, so don't forget to do that.
// It can be configured to skip response smaller than minSize.
type GzipResponseWriter struct {
http.ResponseWriter
2017-09-12 21:26:13 +00:00
index int // Index for gzipWriterPools.
gw *gzip.Writer
code int // Saves the WriteHeader value.
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
contentTypes []string // Only compress if the response is one of these content-types. All are accepted if empty.
}
// Write appends data to the gzip writer.
2017-09-12 21:26:13 +00:00
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// If content type is not set.
if _, ok := w.Header()[contentType]; !ok {
// It infer it from the uncompressed body.
w.Header().Set(contentType, http.DetectContentType(b))
}
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
if w.gw != nil {
n, err := w.gw.Write(b)
return n, err
}
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
// On the first write, w.buf changes from nil to a valid slice
w.buf = append(w.buf, b...)
// If the global writes are bigger than the minSize and we're about to write
// a response containing a content type we want to handle, enable
// compression.
2017-09-19 16:31:42 +00:00
if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) && w.Header().Get(contentEncoding) == "" {
2017-09-12 21:26:13 +00:00
err := w.startGzip()
if err != nil {
return 0, err
}
}
return len(b), nil
}
// startGzip initialize any GZIP specific informations.
func (w *GzipResponseWriter) startGzip() error {
// Set the GZIP header.
w.Header().Set(contentEncoding, "gzip")
// if the Content-Length is already set, then calls to Write on gzip
// will fail to set the Content-Length header since its already set
// See: https://github.com/golang/go/issues/14975.
w.Header().Del(contentLength)
// Write the header to gzip response.
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
}
// Initialize the GZIP response.
w.init()
// Flush the buffer into the gzip response.
n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't
// accept the entire buffer but returned no specific error, we have no clue
// what's going on, so abort just to be safe.
if err == nil && n < len(w.buf) {
return io.ErrShortWrite
}
2017-09-12 21:26:13 +00:00
w.buf = nil
return err
}
// WriteHeader just saves the response code until close or GZIP effective writes.
func (w *GzipResponseWriter) WriteHeader(code int) {
w.code = code
}
// init graps a new gzip writer from the gzipWriterPool and writes the correct
// content encoding header.
func (w *GzipResponseWriter) init() {
// Bytes written during ServeHTTP are redirected to this gzip writer
// before being written to the underlying response.
gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
gzw.Reset(w.ResponseWriter)
w.gw = gzw
}
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
func (w *GzipResponseWriter) Close() error {
if w.gw == nil {
// Gzip not trigged yet, write out regular response.
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
}
if w.buf != nil {
_, writeErr := w.ResponseWriter.Write(w.buf)
// Returns the error if any at write.
if writeErr != nil {
return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error())
}
}
return nil
}
err := w.gw.Close()
gzipWriterPools[w.index].Put(w.gw)
w.gw = nil
return err
}
// Flush flushes the underlying *gzip.Writer and then the underlying
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
// an http.Flusher.
2017-09-12 21:26:13 +00:00
func (w *GzipResponseWriter) Flush() {
if w.gw != nil {
w.gw.Flush()
}
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
fw.Flush()
}
}
2017-09-12 21:26:13 +00:00
// Hijack implements http.Hijacker. If the underlying ResponseWriter is a
// Hijacker, its Hijack method is returned. Otherwise an error is returned.
func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if hj, ok := w.ResponseWriter.(http.Hijacker); ok {
return hj.Hijack()
}
return nil, nil, fmt.Errorf("http.Hijacker interface is not supported")
}
// verify Hijacker interface implementation
var _ http.Hijacker = &GzipResponseWriter{}
// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in
// an error case it panics rather than returning an error.
func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
wrap, err := NewGzipLevelHandler(level)
if err != nil {
panic(err)
}
return wrap
}
// NewGzipLevelHandler returns a wrapper function (often known as middleware)
// which can be used to wrap an HTTP handler to transparently gzip the response
// body if the client supports it (via the Accept-Encoding header). Responses will
// be encoded at the given gzip compression level. An error will be returned only
// if an invalid gzip compression level is given, so if one can ensure the level
// is valid, the returned error can be safely ignored.
func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
return NewGzipLevelAndMinSize(level, DefaultMinSize)
}
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
// specify the minimum size before compression.
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
}
func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
c := &config{
level: gzip.DefaultCompression,
minSize: DefaultMinSize,
}
for _, o := range opts {
o(c)
}
if err := c.validate(); err != nil {
return nil, err
}
return func(h http.Handler) http.Handler {
index := poolIndex(c.level)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add(vary, acceptEncoding)
if acceptsGzip(r) {
gw := &GzipResponseWriter{
ResponseWriter: w,
index: index,
minSize: c.minSize,
contentTypes: c.contentTypes,
}
defer gw.Close()
h.ServeHTTP(gw, r)
} else {
h.ServeHTTP(w, r)
}
})
}, nil
}
// Used for functional configuration.
type config struct {
minSize int
level int
contentTypes []string
}
func (c *config) validate() error {
if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
return fmt.Errorf("invalid compression level requested: %d", c.level)
}
if c.minSize < 0 {
return fmt.Errorf("minimum size must be more than zero")
}
return nil
}
type option func(c *config)
func MinSize(size int) option {
return func(c *config) {
c.minSize = size
}
}
func CompressionLevel(level int) option {
return func(c *config) {
c.level = level
}
}
func ContentTypes(types []string) option {
return func(c *config) {
c.contentTypes = []string{}
for _, v := range types {
c.contentTypes = append(c.contentTypes, strings.ToLower(v))
}
}
}
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
2017-09-12 21:26:13 +00:00
// the client supports it (via the Accept-Encoding header). This will compress at
// the default compression level.
func GzipHandler(h http.Handler) http.Handler {
2017-09-12 21:26:13 +00:00
wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression)
return wrapper(h)
}
// acceptsGzip returns true if the given HTTP request indicates that it will
2017-09-12 21:26:13 +00:00
// accept a gzipped response.
func acceptsGzip(r *http.Request) bool {
acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
return acceptedEncodings["gzip"] > 0.0
}
2017-09-12 21:26:13 +00:00
// returns true if we've been configured to compress the specific content type.
func handleContentType(contentTypes []string, w http.ResponseWriter) bool {
// If contentTypes is empty we handle all content types.
if len(contentTypes) == 0 {
return true
}
ct := strings.ToLower(w.Header().Get(contentType))
for _, c := range contentTypes {
if c == ct {
return true
}
}
return false
}
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
// appear in an Accept-Encoding header. It returns a map of content-codings to
2017-09-12 21:26:13 +00:00
// quality values, and an error containing the errors encountered. It's probably
// safe to ignore those, because silently ignoring errors is how the internet
// works.
//
2017-09-12 21:26:13 +00:00
// See: http://tools.ietf.org/html/rfc2616#section-14.3.
func parseEncodings(s string) (codings, error) {
c := make(codings)
2017-09-12 21:26:13 +00:00
var e []string
for _, ss := range strings.Split(s, ",") {
coding, qvalue, err := parseCoding(ss)
if err != nil {
e = append(e, err.Error())
} else {
c[coding] = qvalue
}
}
// TODO (adammck): Use a proper multi-error struct, so the individual errors
// can be extracted if anyone cares.
if len(e) > 0 {
return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
}
return c, nil
}
// parseCoding parses a single conding (content-coding with an optional qvalue),
// as might appear in an Accept-Encoding header. It attempts to forgive minor
// formatting errors.
func parseCoding(s string) (coding string, qvalue float64, err error) {
for n, part := range strings.Split(s, ";") {
part = strings.TrimSpace(part)
2017-09-12 21:26:13 +00:00
qvalue = DefaultQValue
if n == 0 {
coding = strings.ToLower(part)
} else if strings.HasPrefix(part, "q=") {
qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
if qvalue < 0.0 {
qvalue = 0.0
} else if qvalue > 1.0 {
qvalue = 1.0
}
}
}
if coding == "" {
err = fmt.Errorf("empty content-coding")
}
return
}