diff --git a/go.mod b/go.mod index d4b28e3a2..82dfdcede 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,6 @@ require ( github.com/cockroachdb/apd v1.1.0 // indirect github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c github.com/coreos/go-semver v0.2.0 - github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d // indirect github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a github.com/dnaeon/go-vcr v1.0.1 // indirect github.com/dsnet/compress v0.0.1 // indirect @@ -48,7 +47,6 @@ require ( github.com/golang/protobuf v1.3.2 github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a - github.com/google/go-querystring v1.0.0 // indirect github.com/hashicorp/consul-template v0.22.0 github.com/hashicorp/consul/api v1.1.0 github.com/hashicorp/errwrap v1.0.0 @@ -93,7 +91,6 @@ require ( github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869 github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f github.com/kr/pretty v0.1.0 - github.com/kr/pty v1.1.3 // indirect github.com/kr/text v0.1.0 github.com/lib/pq v1.2.0 github.com/mattn/go-colorable v0.1.2 @@ -108,7 +105,6 @@ require ( github.com/ncw/swift v1.0.47 github.com/nwaples/rardecode v1.0.0 // indirect github.com/oklog/run v1.0.0 - github.com/onsi/ginkgo v1.7.0 // indirect github.com/oracle/oci-go-sdk v7.0.0+incompatible github.com/ory/dockertest v3.3.4+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible diff --git a/go.sum b/go.sum index 8bdced0f1..c401c5a01 100644 --- a/go.sum +++ b/go.sum @@ -236,6 +236,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 000000000..0cd380037 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 000000000..8b8afc4f0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 000000000..6efcfd0ce --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 000000000..01b574320 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 000000000..3600848d3 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 000000000..7c1b37ecc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 000000000..b0fd51d5b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 000000000..b9914a679 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 000000000..b371f396e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 000000000..d905c21a2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 000000000..d36e1dd60 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 000000000..e8d503d04 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 000000000..e0a742a88 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z', '+': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 000000000..50869ef92 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim new file mode 100644 index 000000000..562164be0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 000000000..c73f8afc1 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 000000000..608997c22 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go b/vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go deleted file mode 100644 index 7405c6f3e..000000000 --- a/vendor/github.com/cloudfoundry-community/go-cfclient/gen_error.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build ignore - -package main - -import ( - "bytes" - "go/format" - "io/ioutil" - "log" - "net/http" - "sort" - "strings" - "text/template" - "time" - - "gopkg.in/yaml.v2" -) - -type CFCode int -type HTTPCode int - -type Definition struct { - CFCode `yaml:"-"` - Name string `yaml:"name"` - HTTPCode `yaml:"http_code"` - Message string `yaml:"message"` -} - -func main() { - const url = "https://raw.githubusercontent.com/cloudfoundry/cloud_controller_ng/master/vendor/errors/v2.yml" - - resp, err := http.Get(url) - if err != nil { - log.Fatal(err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Fatal(err) - } - - var m map[CFCode]Definition - - if err := yaml.Unmarshal(body, &m); err != nil { - log.Fatal(err) - } - - var definitions []Definition - - for c, d := range m { - d.CFCode = c - definitions = append(definitions, d) - } - - sort.Slice(definitions, func(i, j int) bool { - return definitions[i].CFCode < definitions[j].CFCode - }) - - buf := &bytes.Buffer{} - - if err := packageTemplate.Execute(buf, struct { - Timestamp time.Time - Definitions []Definition - }{ - Timestamp: time.Now(), - Definitions: definitions, - }); err != nil { - log.Fatal(err) - } - - dst, err := format.Source(buf.Bytes()) - if err != nil { - log.Printf("%s", buf.Bytes()) - log.Fatal(err) - } - - if err := ioutil.WriteFile("cf_error.go", dst, 0600); err != nil { - log.Fatal(err) - } -} - -// destutter ensures that s does not end in "Error". -func destutter(s string) string { - return strings.TrimSuffix(s, "Error") -} - -var packageTemplate = template.Must(template.New("").Funcs(template.FuncMap{ - "destutter": destutter, -}).Parse(` -package cfclient - -// Code generated by go generate. DO NOT EDIT. -// This file was generated by robots at -// {{ .Timestamp }} - -import "github.com/pkg/errors" - -{{- range .Definitions }} -{{$method := printf "Is%sError" (.Name | destutter) }} -// {{ $method }} returns a boolean indicating whether -// the error is known to report the Cloud Foundry error: -// - Cloud Foundry code: {{ .CFCode }} -// - HTTP code: {{ .HTTPCode }} -// - message: {{ printf "%q" .Message }} -func Is{{ .Name | destutter }}Error(err error) bool { - cause := errors.Cause(err) - cferr, ok := cause.(CloudFoundryError) - if !ok { - return false - } - return cferr.Code == {{ .CFCode }} -} -{{- end }} -`)) diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go deleted file mode 100644 index 26bf628e1..000000000 --- a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go +++ /dev/null @@ -1,703 +0,0 @@ -// Copyright 2017, Joe Tsai. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build ignore - -package main - -import ( - "bytes" - "go/format" - "io/ioutil" - "log" - "os" - "text/template" -) - -func main() { - if len(os.Args) != 3 { - log.Fatalf("Usage: %s GO_TYPE OUTPUT_FILE", os.Args[0]) - } - typ := os.Args[1] - path := os.Args[2] - - b := new(bytes.Buffer) - t := template.Must(template.New("source").Parse(source)) - if err := t.Execute(b, struct { - Type, GeneratedMessage string - }{typ, "// Code generated by sais_gen.go. DO NOT EDIT."}); err != nil { - log.Fatalf("Template.Execute error: %v", err) - } - out, err := format.Source(bytes.TrimSpace(b.Bytes())) - if err != nil { - log.Fatalf("format.Source error: %v", err) - } - if err := ioutil.WriteFile(path, out, 0644); err != nil { - log.Fatalf("ioutil.WriteFile error: %v", err) - } -} - -const source = ` -// Copyright 2015, Joe Tsai. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -{{.GeneratedMessage}} - -// ==================================================== -// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. -// -// Permission is hereby granted, free of charge, to any person -// obtaining a copy of this software and associated documentation -// files (the "Software"), to deal in the Software without -// restriction, including without limitation the rights to use, -// copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -// OTHER DEALINGS IN THE SOFTWARE. -// ==================================================== - -package sais - -func getCounts_{{.Type}}(T []{{.Type}}, C []int, n, k int) { - var i int - for i = 0; i < k; i++ { - C[i] = 0 - } - for i = 0; i < n; i++ { - C[T[i]]++ - } -} - -func getBuckets_{{.Type}}(C, B []int, k int, end bool) { - var i, sum int - if end { - for i = 0; i < k; i++ { - sum += C[i] - B[i] = sum - } - } else { - for i = 0; i < k; i++ { - sum += C[i] - B[i] = sum - C[i] - } - } -} - -func sortLMS1_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) { - var b, i, j int - var c0, c1 int - - // Compute SAl. - if &C[0] == &B[0] { - getCounts_{{.Type}}(T, C, n, k) - } - getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets - j = n - 1 - c1 = int(T[j]) - b = B[c1] - j-- - if int(T[j]) < c1 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - for i = 0; i < n; i++ { - if j = SA[i]; j > 0 { - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - j-- - if int(T[j]) < c1 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - SA[i] = 0 - } else if j < 0 { - SA[i] = ^j - } - } - - // Compute SAs. - if &C[0] == &B[0] { - getCounts_{{.Type}}(T, C, n, k) - } - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - c1 = 0 - b = B[c1] - for i = n - 1; i >= 0; i-- { - if j = SA[i]; j > 0 { - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - j-- - b-- - if int(T[j]) > c1 { - SA[b] = ^(j + 1) - } else { - SA[b] = j - } - SA[i] = 0 - } - } -} - -func postProcLMS1_{{.Type}}(T []{{.Type}}, SA []int, n, m int) int { - var i, j, p, q, plen, qlen, name int - var c0, c1 int - var diff bool - - // Compact all the sorted substrings into the first m items of SA. - // 2*m must be not larger than n (provable). - for i = 0; SA[i] < 0; i++ { - SA[i] = ^SA[i] - } - if i < m { - for j, i = i, i+1; ; i++ { - if p = SA[i]; p < 0 { - SA[j] = ^p - j++ - SA[i] = 0 - if j == m { - break - } - } - } - } - - // Store the length of all substrings. - i = n - 1 - j = n - 1 - c0 = int(T[n-1]) - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - for i >= 0 { - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 > c1 { - break - } - } - if i >= 0 { - SA[m+((i+1)>>1)] = j - i - j = i + 1 - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - } - } - - // Find the lexicographic names of all substrings. - name = 0 - qlen = 0 - for i, q = 0, n; i < m; i++ { - p = SA[i] - plen = SA[m+(p>>1)] - diff = true - if (plen == qlen) && ((q + plen) < n) { - for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { - } - if j == plen { - diff = false - } - } - if diff { - name++ - q = p - qlen = plen - } - SA[m+(p>>1)] = name - } - return name -} - -func sortLMS2_{{.Type}}(T []{{.Type}}, SA, C, B, D []int, n, k int) { - var b, i, j, t, d int - var c0, c1 int - - // Compute SAl. - getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets - j = n - 1 - c1 = int(T[j]) - b = B[c1] - j-- - if int(T[j]) < c1 { - t = 1 - } else { - t = 0 - } - j += n - if t&1 > 0 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - for i, d = 0, 0; i < n; i++ { - if j = SA[i]; j > 0 { - if n <= j { - d += 1 - j -= n - } - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - j-- - t = int(c0) << 1 - if int(T[j]) < c1 { - t |= 1 - } - if D[t] != d { - j += n - D[t] = d - } - if t&1 > 0 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - SA[i] = 0 - } else if j < 0 { - SA[i] = ^j - } - } - for i = n - 1; 0 <= i; i-- { - if SA[i] > 0 { - if SA[i] < n { - SA[i] += n - for j = i - 1; SA[j] < n; j-- { - } - SA[j] -= n - i = j - } - } - } - - // Compute SAs. - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - c1 = 0 - b = B[c1] - for i, d = n-1, d+1; i >= 0; i-- { - if j = SA[i]; j > 0 { - if n <= j { - d += 1 - j -= n - } - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - j-- - t = int(c0) << 1 - if int(T[j]) > c1 { - t |= 1 - } - if D[t] != d { - j += n - D[t] = d - } - b-- - if t&1 > 0 { - SA[b] = ^(j + 1) - } else { - SA[b] = j - } - SA[i] = 0 - } - } -} - -func postProcLMS2_{{.Type}}(SA []int, n, m int) int { - var i, j, d, name int - - // Compact all the sorted LMS substrings into the first m items of SA. - name = 0 - for i = 0; SA[i] < 0; i++ { - j = ^SA[i] - if n <= j { - name += 1 - } - SA[i] = j - } - if i < m { - for d, i = i, i+1; ; i++ { - if j = SA[i]; j < 0 { - j = ^j - if n <= j { - name += 1 - } - SA[d] = j - d++ - SA[i] = 0 - if d == m { - break - } - } - } - } - if name < m { - // Store the lexicographic names. - for i, d = m-1, name+1; 0 <= i; i-- { - if j = SA[i]; n <= j { - j -= n - d-- - } - SA[m+(j>>1)] = d - } - } else { - // Unset flags. - for i = 0; i < m; i++ { - if j = SA[i]; n <= j { - j -= n - SA[i] = j - } - } - } - return name -} - -func induceSA_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) { - var b, i, j int - var c0, c1 int - - // Compute SAl. - if &C[0] == &B[0] { - getCounts_{{.Type}}(T, C, n, k) - } - getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets - j = n - 1 - c1 = int(T[j]) - b = B[c1] - if j > 0 && int(T[j-1]) < c1 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - for i = 0; i < n; i++ { - j = SA[i] - SA[i] = ^j - if j > 0 { - j-- - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - if j > 0 && int(T[j-1]) < c1 { - SA[b] = ^j - } else { - SA[b] = j - } - b++ - } - } - - // Compute SAs. - if &C[0] == &B[0] { - getCounts_{{.Type}}(T, C, n, k) - } - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - c1 = 0 - b = B[c1] - for i = n - 1; i >= 0; i-- { - if j = SA[i]; j > 0 { - j-- - if c0 = int(T[j]); c0 != c1 { - B[c1] = b - c1 = c0 - b = B[c1] - } - b-- - if (j == 0) || (int(T[j-1]) > c1) { - SA[b] = ^j - } else { - SA[b] = j - } - } else { - SA[i] = ^j - } - } -} - -func computeSA_{{.Type}}(T []{{.Type}}, SA []int, fs, n, k int) { - const ( - minBucketSize = 512 - sortLMS2Limit = 0x3fffffff - ) - - var C, B, D, RA []int - var bo int // Offset of B relative to SA - var b, i, j, m, p, q, name, newfs int - var c0, c1 int - var flags uint - - if k <= minBucketSize { - C = make([]int, k) - if k <= fs { - bo = n + fs - k - B = SA[bo:] - flags = 1 - } else { - B = make([]int, k) - flags = 3 - } - } else if k <= fs { - C = SA[n+fs-k:] - if k <= fs-k { - bo = n + fs - 2*k - B = SA[bo:] - flags = 0 - } else if k <= 4*minBucketSize { - B = make([]int, k) - flags = 2 - } else { - B = C - flags = 8 - } - } else { - C = make([]int, k) - B = C - flags = 4 | 8 - } - if n <= sortLMS2Limit && 2 <= (n/k) { - if flags&1 > 0 { - if 2*k <= fs-k { - flags |= 32 - } else { - flags |= 16 - } - } else if flags == 0 && 2*k <= (fs-2*k) { - flags |= 32 - } - } - - // Stage 1: Reduce the problem by at least 1/2. - // Sort all the LMS-substrings. - getCounts_{{.Type}}(T, C, n, k) - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - for i = 0; i < n; i++ { - SA[i] = 0 - } - b = -1 - i = n - 1 - j = n - m = 0 - c0 = int(T[n-1]) - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - for i >= 0 { - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 > c1 { - break - } - } - if i >= 0 { - if b >= 0 { - SA[b] = j - } - B[c1]-- - b = B[c1] - j = i - m++ - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - } - } - - if m > 1 { - if flags&(16|32) > 0 { - if flags&16 > 0 { - D = make([]int, 2*k) - } else { - D = SA[bo-2*k:] - } - B[T[j+1]]++ - for i, j = 0, 0; i < k; i++ { - j += C[i] - if B[i] != j { - SA[B[i]] += n - } - D[i] = 0 - D[i+k] = 0 - } - sortLMS2_{{.Type}}(T, SA, C, B, D, n, k) - name = postProcLMS2_{{.Type}}(SA, n, m) - } else { - sortLMS1_{{.Type}}(T, SA, C, B, n, k) - name = postProcLMS1_{{.Type}}(T, SA, n, m) - } - } else if m == 1 { - SA[b] = j + 1 - name = 1 - } else { - name = 0 - } - - // Stage 2: Solve the reduced problem. - // Recurse if names are not yet unique. - if name < m { - newfs = n + fs - 2*m - if flags&(1|4|8) == 0 { - if k+name <= newfs { - newfs -= k - } else { - flags |= 8 - } - } - RA = SA[m+newfs:] - for i, j = m+(n>>1)-1, m-1; m <= i; i-- { - if SA[i] != 0 { - RA[j] = SA[i] - 1 - j-- - } - } - computeSA_int(RA, SA, newfs, m, name) - - i = n - 1 - j = m - 1 - c0 = int(T[n-1]) - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - for i >= 0 { - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 > c1 { - break - } - } - if i >= 0 { - RA[j] = i + 1 - j-- - for { - c1 = c0 - if i--; i < 0 { - break - } - if c0 = int(T[i]); c0 < c1 { - break - } - } - } - } - for i = 0; i < m; i++ { - SA[i] = RA[SA[i]] - } - if flags&4 > 0 { - B = make([]int, k) - C = B - } - if flags&2 > 0 { - B = make([]int, k) - } - } - - // Stage 3: Induce the result for the original problem. - if flags&8 > 0 { - getCounts_{{.Type}}(T, C, n, k) - } - // Put all left-most S characters into their buckets. - if m > 1 { - getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets - i = m - 1 - j = n - p = SA[m-1] - c1 = int(T[p]) - for { - c0 = c1 - q = B[c0] - for q < j { - j-- - SA[j] = 0 - } - for { - j-- - SA[j] = p - if i--; i < 0 { - break - } - p = SA[i] - if c1 = int(T[p]); c1 != c0 { - break - } - } - if i < 0 { - break - } - } - for j > 0 { - j-- - SA[j] = 0 - } - } - induceSA_{{.Type}}(T, SA, C, B, n, k) -} -` diff --git a/vendor/github.com/google/go-github/github/gen-accessors.go b/vendor/github.com/google/go-github/github/gen-accessors.go deleted file mode 100644 index fe92206fc..000000000 --- a/vendor/github.com/google/go-github/github/gen-accessors.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2017 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// gen-accessors generates accessor methods for structs with pointer fields. -// -// It is meant to be used by the go-github authors in conjunction with the -// go generate tool before sending a commit to GitHub. -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "sort" - "strings" - "text/template" -) - -const ( - fileSuffix = "-accessors.go" -) - -var ( - verbose = flag.Bool("v", false, "Print verbose log messages") - - sourceTmpl = template.Must(template.New("source").Parse(source)) - - // blacklistStructMethod lists "struct.method" combos to skip. - blacklistStructMethod = map[string]bool{ - "RepositoryContent.GetContent": true, - "Client.GetBaseURL": true, - "Client.GetUploadURL": true, - "ErrorResponse.GetResponse": true, - "RateLimitError.GetResponse": true, - "AbuseRateLimitError.GetResponse": true, - } - // blacklistStruct lists structs to skip. - blacklistStruct = map[string]bool{ - "Client": true, - } -) - -func logf(fmt string, args ...interface{}) { - if *verbose { - log.Printf(fmt, args...) - } -} - -func main() { - flag.Parse() - fset := token.NewFileSet() - - pkgs, err := parser.ParseDir(fset, ".", sourceFilter, 0) - if err != nil { - log.Fatal(err) - return - } - - for pkgName, pkg := range pkgs { - t := &templateData{ - filename: pkgName + fileSuffix, - Year: 2017, - Package: pkgName, - Imports: map[string]string{}, - } - for filename, f := range pkg.Files { - logf("Processing %v...", filename) - if err := t.processAST(f); err != nil { - log.Fatal(err) - } - } - if err := t.dump(); err != nil { - log.Fatal(err) - } - } - logf("Done.") -} - -func (t *templateData) processAST(f *ast.File) error { - for _, decl := range f.Decls { - gd, ok := decl.(*ast.GenDecl) - if !ok { - continue - } - for _, spec := range gd.Specs { - ts, ok := spec.(*ast.TypeSpec) - if !ok { - continue - } - // Skip unexported identifiers. - if !ts.Name.IsExported() { - logf("Struct %v is unexported; skipping.", ts.Name) - continue - } - // Check if the struct is blacklisted. - if blacklistStruct[ts.Name.Name] { - logf("Struct %v is blacklisted; skipping.", ts.Name) - continue - } - st, ok := ts.Type.(*ast.StructType) - if !ok { - continue - } - for _, field := range st.Fields.List { - se, ok := field.Type.(*ast.StarExpr) - if len(field.Names) == 0 || !ok { - continue - } - - fieldName := field.Names[0] - // Skip unexported identifiers. - if !fieldName.IsExported() { - logf("Field %v is unexported; skipping.", fieldName) - continue - } - // Check if "struct.method" is blacklisted. - if key := fmt.Sprintf("%v.Get%v", ts.Name, fieldName); blacklistStructMethod[key] { - logf("Method %v is blacklisted; skipping.", key) - continue - } - - switch x := se.X.(type) { - case *ast.ArrayType: - t.addArrayType(x, ts.Name.String(), fieldName.String()) - case *ast.Ident: - t.addIdent(x, ts.Name.String(), fieldName.String()) - case *ast.MapType: - t.addMapType(x, ts.Name.String(), fieldName.String()) - case *ast.SelectorExpr: - t.addSelectorExpr(x, ts.Name.String(), fieldName.String()) - default: - logf("processAST: type %q, field %q, unknown %T: %+v", ts.Name, fieldName, x, x) - } - } - } - } - return nil -} - -func sourceFilter(fi os.FileInfo) bool { - return !strings.HasSuffix(fi.Name(), "_test.go") && !strings.HasSuffix(fi.Name(), fileSuffix) -} - -func (t *templateData) dump() error { - if len(t.Getters) == 0 { - logf("No getters for %v; skipping.", t.filename) - return nil - } - - // Sort getters by ReceiverType.FieldName. - sort.Sort(byName(t.Getters)) - - var buf bytes.Buffer - if err := sourceTmpl.Execute(&buf, t); err != nil { - return err - } - clean, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - - logf("Writing %v...", t.filename) - return ioutil.WriteFile(t.filename, clean, 0644) -} - -func newGetter(receiverType, fieldName, fieldType, zeroValue string, namedStruct bool) *getter { - return &getter{ - sortVal: strings.ToLower(receiverType) + "." + strings.ToLower(fieldName), - ReceiverVar: strings.ToLower(receiverType[:1]), - ReceiverType: receiverType, - FieldName: fieldName, - FieldType: fieldType, - ZeroValue: zeroValue, - NamedStruct: namedStruct, - } -} - -func (t *templateData) addArrayType(x *ast.ArrayType, receiverType, fieldName string) { - var eltType string - switch elt := x.Elt.(type) { - case *ast.Ident: - eltType = elt.String() - default: - logf("addArrayType: type %q, field %q: unknown elt type: %T %+v; skipping.", receiverType, fieldName, elt, elt) - return - } - - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, "[]"+eltType, "nil", false)) -} - -func (t *templateData) addIdent(x *ast.Ident, receiverType, fieldName string) { - var zeroValue string - var namedStruct = false - switch x.String() { - case "int", "int64": - zeroValue = "0" - case "string": - zeroValue = `""` - case "bool": - zeroValue = "false" - case "Timestamp": - zeroValue = "Timestamp{}" - default: - zeroValue = "nil" - namedStruct = true - } - - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, x.String(), zeroValue, namedStruct)) -} - -func (t *templateData) addMapType(x *ast.MapType, receiverType, fieldName string) { - var keyType string - switch key := x.Key.(type) { - case *ast.Ident: - keyType = key.String() - default: - logf("addMapType: type %q, field %q: unknown key type: %T %+v; skipping.", receiverType, fieldName, key, key) - return - } - - var valueType string - switch value := x.Value.(type) { - case *ast.Ident: - valueType = value.String() - default: - logf("addMapType: type %q, field %q: unknown value type: %T %+v; skipping.", receiverType, fieldName, value, value) - return - } - - fieldType := fmt.Sprintf("map[%v]%v", keyType, valueType) - zeroValue := fmt.Sprintf("map[%v]%v{}", keyType, valueType) - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false)) -} - -func (t *templateData) addSelectorExpr(x *ast.SelectorExpr, receiverType, fieldName string) { - if strings.ToLower(fieldName[:1]) == fieldName[:1] { // Non-exported field. - return - } - - var xX string - if xx, ok := x.X.(*ast.Ident); ok { - xX = xx.String() - } - - switch xX { - case "time", "json": - if xX == "json" { - t.Imports["encoding/json"] = "encoding/json" - } else { - t.Imports[xX] = xX - } - fieldType := fmt.Sprintf("%v.%v", xX, x.Sel.Name) - zeroValue := fmt.Sprintf("%v.%v{}", xX, x.Sel.Name) - if xX == "time" && x.Sel.Name == "Duration" { - zeroValue = "0" - } - t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false)) - default: - logf("addSelectorExpr: xX %q, type %q, field %q: unknown x=%+v; skipping.", xX, receiverType, fieldName, x) - } -} - -type templateData struct { - filename string - Year int - Package string - Imports map[string]string - Getters []*getter -} - -type getter struct { - sortVal string // Lower-case version of "ReceiverType.FieldName". - ReceiverVar string // The one-letter variable name to match the ReceiverType. - ReceiverType string - FieldName string - FieldType string - ZeroValue string - NamedStruct bool // Getter for named struct. -} - -type byName []*getter - -func (b byName) Len() int { return len(b) } -func (b byName) Less(i, j int) bool { return b[i].sortVal < b[j].sortVal } -func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -const source = `// Copyright {{.Year}} The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by gen-accessors; DO NOT EDIT. - -package {{.Package}} -{{with .Imports}} -import ( - {{- range . -}} - "{{.}}" - {{end -}} -) -{{end}} -{{range .Getters}} -{{if .NamedStruct}} -// Get{{.FieldName}} returns the {{.FieldName}} field. -func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() *{{.FieldType}} { - if {{.ReceiverVar}} == nil { - return {{.ZeroValue}} - } - return {{.ReceiverVar}}.{{.FieldName}} -} -{{else}} -// Get{{.FieldName}} returns the {{.FieldName}} field if it's non-nil, zero value otherwise. -func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() {{.FieldType}} { - if {{.ReceiverVar}} == nil || {{.ReceiverVar}}.{{.FieldName}} == nil { - return {{.ZeroValue}} - } - return *{{.ReceiverVar}}.{{.FieldName}} -} -{{end}} -{{end}} -` diff --git a/vendor/github.com/hashicorp/consul-template/LICENSE b/vendor/github.com/hashicorp/consul-template/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/consul-template/child/child.go b/vendor/github.com/hashicorp/consul-template/child/child.go new file mode 100644 index 000000000..3c94816f5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/child/child.go @@ -0,0 +1,428 @@ +package child + +import ( + "errors" + "fmt" + "io" + "log" + "math/rand" + "os" + "os/exec" + "strings" + "sync" + "syscall" + "time" +) + +func init() { + // Seed the default rand Source with current time to produce better random + // numbers used with splay + rand.Seed(time.Now().UnixNano()) +} + +var ( + // ErrMissingCommand is the error returned when no command is specified + // to run. + ErrMissingCommand = errors.New("missing command") + + // ExitCodeOK is the default OK exit code. + ExitCodeOK = 0 + + // ExitCodeError is the default error code returned when the child exits with + // an error without a more specific code. + ExitCodeError = 127 +) + +// Child is a wrapper around a child process which can be used to send signals +// and manage the processes' lifecycle. +type Child struct { + sync.RWMutex + + stdin io.Reader + stdout, stderr io.Writer + command string + args []string + env []string + + timeout time.Duration + + reloadSignal os.Signal + + killSignal os.Signal + killTimeout time.Duration + + splay time.Duration + + // cmd is the actual child process under management. + cmd *exec.Cmd + + // exitCh is the channel where the processes exit will be returned. + exitCh chan int + + // stopLock is the mutex to lock when stopping. stopCh is the circuit breaker + // to force-terminate any waiting splays to kill the process now. stopped is + // a boolean that tells us if we have previously been stopped. + stopLock sync.RWMutex + stopCh chan struct{} + stopped bool +} + +// NewInput is input to the NewChild function. +type NewInput struct { + // Stdin is the io.Reader where input will come from. This is sent directly to + // the child process. Stdout and Stderr represent the io.Writer objects where + // the child process will send output and errorput. + Stdin io.Reader + Stdout, Stderr io.Writer + + // Command is the name of the command to execute. Args are the list of + // arguments to pass when starting the command. + Command string + Args []string + + // Timeout is the maximum amount of time to allow the command to execute. If + // set to 0, the command is permitted to run infinitely. + Timeout time.Duration + + // Env represents the condition of the child processes' environment + // variables. Only these environment variables will be given to the child, so + // it is the responsibility of the caller to include the parent processes + // environment, if required. This should be in the key=value format. + Env []string + + // ReloadSignal is the signal to send to reload this process. This value may + // be nil. + ReloadSignal os.Signal + + // KillSignal is the signal to send to gracefully kill this process. This + // value may be nil. + KillSignal os.Signal + + // KillTimeout is the amount of time to wait for the process to gracefully + // terminate before force-killing. + KillTimeout time.Duration + + // Splay is the maximum random amount of time to wait before sending signals. + // This option helps reduce the thundering herd problem by effectively + // sleeping for a random amount of time before sending the signal. This + // prevents multiple processes from all signaling at the same time. This value + // may be zero (which disables the splay entirely). + Splay time.Duration +} + +// New creates a new child process for management with high-level APIs for +// sending signals to the child process, restarting the child process, and +// gracefully terminating the child process. +func New(i *NewInput) (*Child, error) { + if i == nil { + i = new(NewInput) + } + + if len(i.Command) == 0 { + return nil, ErrMissingCommand + } + + child := &Child{ + stdin: i.Stdin, + stdout: i.Stdout, + stderr: i.Stderr, + command: i.Command, + args: i.Args, + env: i.Env, + timeout: i.Timeout, + reloadSignal: i.ReloadSignal, + killSignal: i.KillSignal, + killTimeout: i.KillTimeout, + splay: i.Splay, + stopCh: make(chan struct{}, 1), + } + + return child, nil +} + +// ExitCh returns the current exit channel for this child process. This channel +// may change if the process is restarted, so implementers must not cache this +// value. +func (c *Child) ExitCh() <-chan int { + c.RLock() + defer c.RUnlock() + return c.exitCh +} + +// Pid returns the pid of the child process. If no child process exists, 0 is +// returned. +func (c *Child) Pid() int { + c.RLock() + defer c.RUnlock() + return c.pid() +} + +// Command returns the human-formatted command with arguments. +func (c *Child) Command() string { + list := append([]string{c.command}, c.args...) + return strings.Join(list, " ") +} + +// Start starts and begins execution of the child process. A buffered channel +// is returned which is where the command's exit code will be returned upon +// exit. Any errors that occur prior to starting the command will be returned +// as the second error argument, but any errors returned by the command after +// execution will be returned as a non-zero value over the exit code channel. +func (c *Child) Start() error { + log.Printf("[INFO] (child) spawning: %s", c.Command()) + c.Lock() + defer c.Unlock() + return c.start() +} + +// Signal sends the signal to the child process, returning any errors that +// occur. +func (c *Child) Signal(s os.Signal) error { + log.Printf("[INFO] (child) receiving signal %q", s.String()) + c.RLock() + defer c.RUnlock() + return c.signal(s) +} + +// Reload sends the reload signal to the child process and does not wait for a +// response. If no reload signal was provided, the process is restarted and +// replaces the process attached to this Child. +func (c *Child) Reload() error { + if c.reloadSignal == nil { + log.Printf("[INFO] (child) restarting process") + + // Take a full lock because start is going to replace the process. We also + // want to make sure that no other routines attempt to send reload signals + // during this transition. + c.Lock() + defer c.Unlock() + + c.kill(false) + return c.start() + } + + log.Printf("[INFO] (child) reloading process") + + // We only need a read lock here because neither the process nor the exit + // channel are changing. + c.RLock() + defer c.RUnlock() + + return c.reload() +} + +// Kill sends the kill signal to the child process and waits for successful +// termination. If no kill signal is defined, the process is killed with the +// most aggressive kill signal. If the process does not gracefully stop within +// the provided KillTimeout, the process is force-killed. If a splay was +// provided, this function will sleep for a random period of time between 0 and +// the provided splay value to reduce the thundering herd problem. This function +// does not return any errors because it guarantees the process will be dead by +// the return of the function call. +func (c *Child) Kill() { + log.Printf("[INFO] (child) killing process") + c.Lock() + defer c.Unlock() + c.kill(false) +} + +// Stop behaves almost identical to Kill except it suppresses future processes +// from being started by this child and it prevents the killing of the child +// process from sending its value back up the exit channel. This is useful +// when doing a graceful shutdown of an application. +func (c *Child) Stop() { + c.internalStop(false) +} + +// StopImmediately behaves almost identical to Stop except it does not wait +// for any random splay if configured. This is used for performing a fast +// shutdown of consul-template and its children when a kill signal is received. +func (c *Child) StopImmediately() { + c.internalStop(true) +} + +func (c *Child) internalStop(immediately bool) { + log.Printf("[INFO] (child) stopping process") + + c.Lock() + defer c.Unlock() + + c.stopLock.Lock() + defer c.stopLock.Unlock() + if c.stopped { + log.Printf("[WARN] (child) already stopped") + return + } + c.kill(immediately) + close(c.stopCh) + c.stopped = true +} + +func (c *Child) start() error { + cmd := exec.Command(c.command, c.args...) + cmd.Stdin = c.stdin + cmd.Stdout = c.stdout + cmd.Stderr = c.stderr + cmd.Env = c.env + if err := cmd.Start(); err != nil { + return err + } + c.cmd = cmd + + // Create a new exitCh so that previously invoked commands (if any) don't + // cause us to exit, and start a goroutine to wait for that process to end. + exitCh := make(chan int, 1) + go func() { + var code int + err := cmd.Wait() + if err == nil { + code = ExitCodeOK + } else { + code = ExitCodeError + if exiterr, ok := err.(*exec.ExitError); ok { + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + code = status.ExitStatus() + } + } + } + + // If the child is in the process of killing, do not send a response back + // down the exit channel. + c.stopLock.RLock() + defer c.stopLock.RUnlock() + if c.stopped { + return + } + + select { + case <-c.stopCh: + case exitCh <- code: + } + }() + + c.exitCh = exitCh + + // If a timeout was given, start the timer to wait for the child to exit + if c.timeout != 0 { + select { + case code := <-exitCh: + if code != 0 { + return fmt.Errorf( + "command exited with a non-zero exit status:\n"+ + "\n"+ + " %s\n"+ + "\n"+ + "This is assumed to be a failure. Please ensure the command\n"+ + "exits with a zero exit status.", + c.Command(), + ) + } + case <-time.After(c.timeout): + // Force-kill the process + c.stopLock.Lock() + defer c.stopLock.Unlock() + if c.cmd != nil && c.cmd.Process != nil { + c.cmd.Process.Kill() + } + + return fmt.Errorf( + "command did not exit within %q:\n"+ + "\n"+ + " %s\n"+ + "\n"+ + "Commands must exit in a timely manner in order for processing to\n"+ + "continue. Consider using a process supervisor or utilizing the\n"+ + "built-in exec mode instead.", + c.timeout, + c.Command(), + ) + } + } + + return nil +} + +func (c *Child) pid() int { + if !c.running() { + return 0 + } + return c.cmd.Process.Pid +} + +func (c *Child) signal(s os.Signal) error { + if !c.running() { + return nil + } + return c.cmd.Process.Signal(s) +} + +func (c *Child) reload() error { + select { + case <-c.stopCh: + case <-c.randomSplay(): + } + + return c.signal(c.reloadSignal) +} + +func (c *Child) kill(immediately bool) { + if !c.running() { + return + } + + exited := false + process := c.cmd.Process + + if c.cmd.ProcessState != nil { + log.Printf("[DEBUG] (child) Kill() called but process dead; not waiting for splay.") + } else if immediately { + log.Printf("[DEBUG] (child) Kill() called but performing immediate shutdown; not waiting for splay.") + } else { + select { + case <-c.stopCh: + case <-c.randomSplay(): + } + } + + if c.killSignal != nil { + if err := process.Signal(c.killSignal); err == nil { + // Wait a few seconds for it to exit + killCh := make(chan struct{}, 1) + go func() { + defer close(killCh) + process.Wait() + }() + + select { + case <-c.stopCh: + case <-killCh: + exited = true + case <-time.After(c.killTimeout): + } + } + } + + if !exited { + process.Kill() + } + + c.cmd = nil +} + +func (c *Child) running() bool { + return c.cmd != nil && c.cmd.Process != nil +} + +func (c *Child) randomSplay() <-chan time.Time { + if c.splay == 0 { + return time.After(0) + } + + ns := c.splay.Nanoseconds() + offset := rand.Int63n(ns) + t := time.Duration(offset) + + log.Printf("[DEBUG] (child) waiting %.2fs for random splay", t.Seconds()) + + return time.After(t) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/auth.go b/vendor/github.com/hashicorp/consul-template/config/auth.go new file mode 100644 index 000000000..207c78136 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/auth.go @@ -0,0 +1,142 @@ +package config + +import ( + "errors" + "fmt" + "strings" +) + +var ( + // ErrAuthStringEmpty is the error returned with authentication is provided, + // but empty. + ErrAuthStringEmpty = errors.New("auth: cannot be empty") +) + +// AuthConfig is the HTTP basic authentication data. +type AuthConfig struct { + Enabled *bool `mapstructure:"enabled"` + Username *string `mapstructure:"username"` + Password *string `mapstructure:"password"` +} + +// DefaultAuthConfig is the default configuration. +func DefaultAuthConfig() *AuthConfig { + return &AuthConfig{} +} + +// ParseAuthConfig parses the auth into username:password. +func ParseAuthConfig(s string) (*AuthConfig, error) { + if s == "" { + return nil, ErrAuthStringEmpty + } + + var a AuthConfig + + if strings.Contains(s, ":") { + split := strings.SplitN(s, ":", 2) + a.Username = String(split[0]) + a.Password = String(split[1]) + } else { + a.Username = String(s) + } + + return &a, nil +} + +// Copy returns a deep copy of this configuration. +func (c *AuthConfig) Copy() *AuthConfig { + if c == nil { + return nil + } + + var o AuthConfig + o.Enabled = c.Enabled + o.Username = c.Username + o.Password = c.Password + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *AuthConfig) Merge(o *AuthConfig) *AuthConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Username != nil { + r.Username = o.Username + } + + if o.Password != nil { + r.Password = o.Password + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *AuthConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + StringPresent(c.Username) || + StringPresent(c.Password)) + } + if c.Username == nil { + c.Username = String("") + } + + if c.Password == nil { + c.Password = String("") + } + + if c.Enabled == nil { + c.Enabled = Bool(*c.Username != "" || *c.Password != "") + } +} + +// GoString defines the printable version of this struct. +func (c *AuthConfig) GoString() string { + if c == nil { + return "(*AuthConfig)(nil)" + } + + return fmt.Sprintf("&AuthConfig{"+ + "Enabled:%s, "+ + "Username:%s, "+ + "Password:%s"+ + "}", + BoolGoString(c.Enabled), + StringGoString(c.Username), + StringGoString(c.Password), + ) +} + +// String is the string representation of this authentication. If authentication +// is not enabled, this returns the empty string. The username and password will +// be separated by a colon. +func (c *AuthConfig) String() string { + if !BoolVal(c.Enabled) { + return "" + } + + if c.Password != nil { + return fmt.Sprintf("%s:%s", StringVal(c.Username), StringVal(c.Password)) + } + + return StringVal(c.Username) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/config.go b/vendor/github.com/hashicorp/consul-template/config/config.go new file mode 100644 index 000000000..b02704093 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/config.go @@ -0,0 +1,606 @@ +package config + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "syscall" + "time" + + "github.com/hashicorp/consul-template/signals" + "github.com/hashicorp/hcl" + homedir "github.com/mitchellh/go-homedir" + "github.com/mitchellh/mapstructure" + + "github.com/pkg/errors" +) + +const ( + // DefaultLogLevel is the default logging level. + DefaultLogLevel = "WARN" + + // DefaultMaxStale is the default staleness permitted. This enables stale + // queries by default for performance reasons. + DefaultMaxStale = 2 * time.Second + + // DefaultReloadSignal is the default signal for reload. + DefaultReloadSignal = syscall.SIGHUP + + // DefaultKillSignal is the default signal for termination. + DefaultKillSignal = syscall.SIGINT +) + +var ( + // homePath is the location to the user's home directory. + homePath, _ = homedir.Dir() +) + +// Config is used to configure Consul Template +type Config struct { + // Consul is the configuration for connecting to a Consul cluster. + Consul *ConsulConfig `mapstructure:"consul"` + + // Dedup is used to configure the dedup settings + Dedup *DedupConfig `mapstructure:"deduplicate"` + + // Exec is the configuration for exec/supervise mode. + Exec *ExecConfig `mapstructure:"exec"` + + // KillSignal is the signal to listen for a graceful terminate event. + KillSignal *os.Signal `mapstructure:"kill_signal"` + + // LogLevel is the level with which to log for this config. + LogLevel *string `mapstructure:"log_level"` + + // MaxStale is the maximum amount of time for staleness from Consul as given + // by LastContact. If supplied, Consul Template will query all servers instead + // of just the leader. + MaxStale *time.Duration `mapstructure:"max_stale"` + + // PidFile is the path on disk where a PID file should be written containing + // this processes PID. + PidFile *string `mapstructure:"pid_file"` + + // ReloadSignal is the signal to listen for a reload event. + ReloadSignal *os.Signal `mapstructure:"reload_signal"` + + // Syslog is the configuration for syslog. + Syslog *SyslogConfig `mapstructure:"syslog"` + + // Templates is the list of templates. + Templates *TemplateConfigs `mapstructure:"template"` + + // Vault is the configuration for connecting to a vault server. + Vault *VaultConfig `mapstructure:"vault"` + + // Wait is the quiescence timers. + Wait *WaitConfig `mapstructure:"wait"` + + // Additional command line options + // Run once, executing each template exactly once, and exit + Once bool +} + +// Copy returns a deep copy of the current configuration. This is useful because +// the nested data structures may be shared. +func (c *Config) Copy() *Config { + if c == nil { + return nil + } + var o Config + + o.Consul = c.Consul + + if c.Consul != nil { + o.Consul = c.Consul.Copy() + } + + if c.Dedup != nil { + o.Dedup = c.Dedup.Copy() + } + + if c.Exec != nil { + o.Exec = c.Exec.Copy() + } + + o.KillSignal = c.KillSignal + + o.LogLevel = c.LogLevel + + o.MaxStale = c.MaxStale + + o.PidFile = c.PidFile + + o.ReloadSignal = c.ReloadSignal + + if c.Syslog != nil { + o.Syslog = c.Syslog.Copy() + } + + if c.Templates != nil { + o.Templates = c.Templates.Copy() + } + + if c.Vault != nil { + o.Vault = c.Vault.Copy() + } + + if c.Wait != nil { + o.Wait = c.Wait.Copy() + } + + o.Once = c.Once + + return &o +} + +// Merge merges the values in config into this config object. Values in the +// config object overwrite the values in c. +func (c *Config) Merge(o *Config) *Config { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Consul != nil { + r.Consul = r.Consul.Merge(o.Consul) + } + + if o.Dedup != nil { + r.Dedup = r.Dedup.Merge(o.Dedup) + } + + if o.Exec != nil { + r.Exec = r.Exec.Merge(o.Exec) + } + + if o.KillSignal != nil { + r.KillSignal = o.KillSignal + } + + if o.LogLevel != nil { + r.LogLevel = o.LogLevel + } + + if o.MaxStale != nil { + r.MaxStale = o.MaxStale + } + + if o.PidFile != nil { + r.PidFile = o.PidFile + } + + if o.ReloadSignal != nil { + r.ReloadSignal = o.ReloadSignal + } + + if o.Syslog != nil { + r.Syslog = r.Syslog.Merge(o.Syslog) + } + + if o.Templates != nil { + r.Templates = r.Templates.Merge(o.Templates) + } + + if o.Vault != nil { + r.Vault = r.Vault.Merge(o.Vault) + } + + if o.Wait != nil { + r.Wait = r.Wait.Merge(o.Wait) + } + + r.Once = o.Once + + return r +} + +// Parse parses the given string contents as a config +func Parse(s string) (*Config, error) { + var shadow interface{} + if err := hcl.Decode(&shadow, s); err != nil { + return nil, errors.Wrap(err, "error decoding config") + } + + // Convert to a map and flatten the keys we want to flatten + parsed, ok := shadow.(map[string]interface{}) + if !ok { + return nil, errors.New("error converting config") + } + + flattenKeys(parsed, []string{ + "auth", + "consul", + "consul.auth", + "consul.retry", + "consul.ssl", + "consul.transport", + "deduplicate", + "env", + "exec", + "exec.env", + "ssl", + "syslog", + "vault", + "vault.retry", + "vault.ssl", + "vault.transport", + "wait", + }) + + // FlattenFlatten keys belonging to the templates. We cannot do this above + // because it is an array of templates. + if templates, ok := parsed["template"].([]map[string]interface{}); ok { + for _, template := range templates { + flattenKeys(template, []string{ + "env", + "exec", + "exec.env", + "wait", + }) + } + } + + // Create a new, empty config + var c Config + + // Use mapstructure to populate the basic config fields + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ConsulStringToStructFunc(), + StringToFileModeFunc(), + signals.StringToSignalFunc(), + StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &c, + }) + if err != nil { + return nil, errors.Wrap(err, "mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return nil, errors.Wrap(err, "mapstructure decode failed") + } + + return &c, nil +} + +// Must returns a config object that must compile. If there are any errors, this +// function will panic. This is most useful in testing or constants. +func Must(s string) *Config { + c, err := Parse(s) + if err != nil { + log.Fatal(err) + } + return c +} + +// TestConfig returns a default, finalized config, with the provided +// configuration taking precedence. +func TestConfig(c *Config) *Config { + d := DefaultConfig().Merge(c) + d.Finalize() + return d +} + +// FromFile reads the configuration file at the given path and returns a new +// Config struct with the data populated. +func FromFile(path string) (*Config, error) { + c, err := ioutil.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "from file: "+path) + } + + config, err := Parse(string(c)) + if err != nil { + return nil, errors.Wrap(err, "from file: "+path) + } + return config, nil +} + +// FromPath iterates and merges all configuration files in a given +// directory, returning the resulting config. +func FromPath(path string) (*Config, error) { + // Ensure the given filepath exists + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, errors.Wrap(err, "missing file/folder: "+path) + } + + // Check if a file was given or a path to a directory + stat, err := os.Stat(path) + if err != nil { + return nil, errors.Wrap(err, "failed stating file: "+path) + } + + // Recursively parse directories, single load files + if stat.Mode().IsDir() { + // Ensure the given filepath has at least one config file + _, err := ioutil.ReadDir(path) + if err != nil { + return nil, errors.Wrap(err, "failed listing dir: "+path) + } + + // Create a blank config to merge off of + var c *Config + + // Potential bug: Walk does not follow symlinks! + err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + // If WalkFunc had an error, just return it + if err != nil { + return err + } + + // Do nothing for directories + if info.IsDir() { + return nil + } + + // Parse and merge the config + newConfig, err := FromFile(path) + if err != nil { + return err + } + c = c.Merge(newConfig) + + return nil + }) + + if err != nil { + return nil, errors.Wrap(err, "walk error") + } + + return c, nil + } else if stat.Mode().IsRegular() { + return FromFile(path) + } + + return nil, fmt.Errorf("unknown filetype: %q", stat.Mode().String()) +} + +// GoString defines the printable version of this struct. +func (c *Config) GoString() string { + if c == nil { + return "(*Config)(nil)" + } + + return fmt.Sprintf("&Config{"+ + "Consul:%#v, "+ + "Dedup:%#v, "+ + "Exec:%#v, "+ + "KillSignal:%s, "+ + "LogLevel:%s, "+ + "MaxStale:%s, "+ + "PidFile:%s, "+ + "ReloadSignal:%s, "+ + "Syslog:%#v, "+ + "Templates:%#v, "+ + "Vault:%#v, "+ + "Wait:%#v,"+ + "Once:%#v"+ + "}", + c.Consul, + c.Dedup, + c.Exec, + SignalGoString(c.KillSignal), + StringGoString(c.LogLevel), + TimeDurationGoString(c.MaxStale), + StringGoString(c.PidFile), + SignalGoString(c.ReloadSignal), + c.Syslog, + c.Templates, + c.Vault, + c.Wait, + c.Once, + ) +} + +// Show diff between 2 Configs, useful in tests +func (expected *Config) Diff(actual *Config) string { + var b strings.Builder + fmt.Fprintf(&b, "\n") + ve := reflect.ValueOf(*expected) + va := reflect.ValueOf(*actual) + ct := ve.Type() + + for i := 0; i < ve.NumField(); i++ { + fc := ve.Field(i) + fo := va.Field(i) + if !reflect.DeepEqual(fc.Interface(), fo.Interface()) { + fmt.Fprintf(&b, "%s:\n", ct.Field(i).Name) + fmt.Fprintf(&b, "\texp: %#v\n", fc.Interface()) + fmt.Fprintf(&b, "\tact: %#v\n", fo.Interface()) + } + } + + return b.String() +} + +// DefaultConfig returns the default configuration struct. Certain environment +// variables may be set which control the values for the default configuration. +func DefaultConfig() *Config { + return &Config{ + Consul: DefaultConsulConfig(), + Dedup: DefaultDedupConfig(), + Exec: DefaultExecConfig(), + Syslog: DefaultSyslogConfig(), + Templates: DefaultTemplateConfigs(), + Vault: DefaultVaultConfig(), + Wait: DefaultWaitConfig(), + } +} + +// Finalize ensures all configuration options have the default values, so it +// is safe to dereference the pointers later down the line. It also +// intelligently tries to activate stanzas that should be "enabled" because +// data was given, but the user did not explicitly add "Enabled: true" to the +// configuration. +func (c *Config) Finalize() { + if c == nil { + return + } + if c.Consul == nil { + c.Consul = DefaultConsulConfig() + } + c.Consul.Finalize() + + if c.Dedup == nil { + c.Dedup = DefaultDedupConfig() + } + c.Dedup.Finalize() + + if c.Exec == nil { + c.Exec = DefaultExecConfig() + } + c.Exec.Finalize() + + if c.KillSignal == nil { + c.KillSignal = Signal(DefaultKillSignal) + } + + if c.LogLevel == nil { + c.LogLevel = stringFromEnv([]string{ + "CT_LOG", + "CONSUL_TEMPLATE_LOG", + }, DefaultLogLevel) + } + + if c.MaxStale == nil { + c.MaxStale = TimeDuration(DefaultMaxStale) + } + + if c.PidFile == nil { + c.PidFile = String("") + } + + if c.ReloadSignal == nil { + c.ReloadSignal = Signal(DefaultReloadSignal) + } + + if c.Syslog == nil { + c.Syslog = DefaultSyslogConfig() + } + c.Syslog.Finalize() + + if c.Templates == nil { + c.Templates = DefaultTemplateConfigs() + } + c.Templates.Finalize() + + if c.Vault == nil { + c.Vault = DefaultVaultConfig() + } + c.Vault.Finalize() + + if c.Wait == nil { + c.Wait = DefaultWaitConfig() + } + c.Wait.Finalize() + + // disable Wait if -once was specified + if c.Once { + c.Wait = &WaitConfig{Enabled: Bool(false)} + } +} + +func stringFromEnv(list []string, def string) *string { + for _, s := range list { + if v := os.Getenv(s); v != "" { + return String(strings.TrimSpace(v)) + } + } + return String(def) +} + +func stringFromFile(list []string, def string) *string { + for _, s := range list { + c, err := ioutil.ReadFile(s) + if err == nil { + return String(strings.TrimSpace(string(c))) + } + } + return String(def) +} + +func antiboolFromEnv(list []string, def bool) *bool { + for _, s := range list { + if v := os.Getenv(s); v != "" { + b, err := strconv.ParseBool(v) + if err == nil { + return Bool(!b) + } + } + } + return Bool(def) +} + +func boolFromEnv(list []string, def bool) *bool { + for _, s := range list { + if v := os.Getenv(s); v != "" { + b, err := strconv.ParseBool(v) + if err == nil { + return Bool(b) + } + } + } + return Bool(def) +} + +// flattenKeys is a function that takes a map[string]interface{} and recursively +// flattens any keys that are a []map[string]interface{} where the key is in the +// given list of keys. +func flattenKeys(m map[string]interface{}, keys []string) { + keyMap := make(map[string]struct{}) + for _, key := range keys { + keyMap[key] = struct{}{} + } + + var flatten func(map[string]interface{}, string) + flatten = func(m map[string]interface{}, parent string) { + for k, v := range m { + // Calculate the map key, since it could include a parent. + mapKey := k + if parent != "" { + mapKey = parent + "." + k + } + + if _, ok := keyMap[mapKey]; !ok { + continue + } + + switch typed := v.(type) { + case []map[string]interface{}: + if len(typed) > 0 { + last := typed[len(typed)-1] + flatten(last, mapKey) + m[k] = last + } else { + m[k] = nil + } + case map[string]interface{}: + flatten(typed, mapKey) + m[k] = typed + default: + m[k] = v + } + } + } + + flatten(m, "") +} diff --git a/vendor/github.com/hashicorp/consul-template/config/consul.go b/vendor/github.com/hashicorp/consul-template/config/consul.go new file mode 100644 index 000000000..ca79ba8b6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/consul.go @@ -0,0 +1,172 @@ +package config + +import "fmt" + +// ConsulConfig contains the configurations options for connecting to a +// Consul cluster. +type ConsulConfig struct { + // Address is the address of the Consul server. It may be an IP or FQDN. + Address *string + + // Auth is the HTTP basic authentication for communicating with Consul. + Auth *AuthConfig `mapstructure:"auth"` + + // Retry is the configuration for specifying how to behave on failure. + Retry *RetryConfig `mapstructure:"retry"` + + // SSL indicates we should use a secure connection while talking to + // Consul. This requires Consul to be configured to serve HTTPS. + SSL *SSLConfig `mapstructure:"ssl"` + + // Token is the token to communicate with Consul securely. + Token *string + + // Transport configures the low-level network connection details. + Transport *TransportConfig `mapstructure:"transport"` +} + +// DefaultConsulConfig returns a configuration that is populated with the +// default values. +func DefaultConsulConfig() *ConsulConfig { + return &ConsulConfig{ + Auth: DefaultAuthConfig(), + Retry: DefaultRetryConfig(), + SSL: DefaultSSLConfig(), + Transport: DefaultTransportConfig(), + } +} + +// Copy returns a deep copy of this configuration. +func (c *ConsulConfig) Copy() *ConsulConfig { + if c == nil { + return nil + } + + var o ConsulConfig + + o.Address = c.Address + + if c.Auth != nil { + o.Auth = c.Auth.Copy() + } + + if c.Retry != nil { + o.Retry = c.Retry.Copy() + } + + if c.SSL != nil { + o.SSL = c.SSL.Copy() + } + + o.Token = c.Token + + if c.Transport != nil { + o.Transport = c.Transport.Copy() + } + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *ConsulConfig) Merge(o *ConsulConfig) *ConsulConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Address != nil { + r.Address = o.Address + } + + if o.Auth != nil { + r.Auth = r.Auth.Merge(o.Auth) + } + + if o.Retry != nil { + r.Retry = r.Retry.Merge(o.Retry) + } + + if o.SSL != nil { + r.SSL = r.SSL.Merge(o.SSL) + } + + if o.Token != nil { + r.Token = o.Token + } + + if o.Transport != nil { + r.Transport = r.Transport.Merge(o.Transport) + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *ConsulConfig) Finalize() { + if c.Address == nil { + c.Address = stringFromEnv([]string{ + "CONSUL_HTTP_ADDR", + }, "") + } + + if c.Auth == nil { + c.Auth = DefaultAuthConfig() + } + c.Auth.Finalize() + + if c.Retry == nil { + c.Retry = DefaultRetryConfig() + } + c.Retry.Finalize() + + if c.SSL == nil { + c.SSL = DefaultSSLConfig() + } + c.SSL.Finalize() + + if c.Token == nil { + c.Token = stringFromEnv([]string{ + "CONSUL_TOKEN", + "CONSUL_HTTP_TOKEN", + }, "") + } + + if c.Transport == nil { + c.Transport = DefaultTransportConfig() + } + c.Transport.Finalize() +} + +// GoString defines the printable version of this struct. +func (c *ConsulConfig) GoString() string { + if c == nil { + return "(*ConsulConfig)(nil)" + } + + return fmt.Sprintf("&ConsulConfig{"+ + "Address:%s, "+ + "Auth:%#v, "+ + "Retry:%#v, "+ + "SSL:%#v, "+ + "Token:%t, "+ + "Transport:%#v"+ + "}", + StringGoString(c.Address), + c.Auth, + c.Retry, + c.SSL, + StringPresent(c.Token), + c.Transport, + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/convert.go b/vendor/github.com/hashicorp/consul-template/config/convert.go new file mode 100644 index 000000000..0fc45bddc --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/convert.go @@ -0,0 +1,197 @@ +package config + +import ( + "fmt" + "os" + "time" + + "github.com/hashicorp/consul-template/signals" +) + +// Bool returns a pointer to the given bool. +func Bool(b bool) *bool { + return &b +} + +// BoolVal returns the value of the boolean at the pointer, or false if the +// pointer is nil. +func BoolVal(b *bool) bool { + if b == nil { + return false + } + return *b +} + +// BoolGoString returns the value of the boolean for printing in a string. +func BoolGoString(b *bool) string { + if b == nil { + return "(*bool)(nil)" + } + return fmt.Sprintf("%t", *b) +} + +// BoolPresent returns a boolean indicating if the pointer is nil, or if the +// pointer is pointing to the zero value.. +func BoolPresent(b *bool) bool { + if b == nil { + return false + } + return true +} + +// FileMode returns a pointer to the given os.FileMode. +func FileMode(o os.FileMode) *os.FileMode { + return &o +} + +// FileModeVal returns the value of the os.FileMode at the pointer, or 0 if the +// pointer is nil. +func FileModeVal(o *os.FileMode) os.FileMode { + if o == nil { + return 0 + } + return *o +} + +// FileModeGoString returns the value of the os.FileMode for printing in a +// string. +func FileModeGoString(o *os.FileMode) string { + if o == nil { + return "(*os.FileMode)(nil)" + } + return fmt.Sprintf("%q", *o) +} + +// FileModePresent returns a boolean indicating if the pointer is nil, or if +// the pointer is pointing to the zero value. +func FileModePresent(o *os.FileMode) bool { + if o == nil { + return false + } + return *o != 0 +} + +// Int returns a pointer to the given int. +func Int(i int) *int { + return &i +} + +// IntVal returns the value of the int at the pointer, or 0 if the pointer is +// nil. +func IntVal(i *int) int { + if i == nil { + return 0 + } + return *i +} + +// IntGoString returns the value of the int for printing in a string. +func IntGoString(i *int) string { + if i == nil { + return "(*int)(nil)" + } + return fmt.Sprintf("%d", *i) +} + +// IntPresent returns a boolean indicating if the pointer is nil, or if the +// pointer is pointing to the zero value. +func IntPresent(i *int) bool { + if i == nil { + return false + } + return *i != 0 +} + +// Signal returns a pointer to the given os.Signal. +func Signal(s os.Signal) *os.Signal { + return &s +} + +// SignalVal returns the value of the os.Signal at the pointer, or 0 if the +// pointer is nil. +func SignalVal(s *os.Signal) os.Signal { + if s == nil { + return (os.Signal)(nil) + } + return *s +} + +// SignalGoString returns the value of the os.Signal for printing in a string. +func SignalGoString(s *os.Signal) string { + if s == nil { + return "(*os.Signal)(nil)" + } + if *s == nil { + return "" + } + return fmt.Sprintf("%q", *s) +} + +// SignalPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. +func SignalPresent(s *os.Signal) bool { + if s == nil { + return false + } + return *s != signals.SIGNIL +} + +// String returns a pointer to the given string. +func String(s string) *string { + return &s +} + +// StringVal returns the value of the string at the pointer, or "" if the +// pointer is nil. +func StringVal(s *string) string { + if s == nil { + return "" + } + return *s +} + +// StringGoString returns the value of the string for printing in a string. +func StringGoString(s *string) string { + if s == nil { + return "(*string)(nil)" + } + return fmt.Sprintf("%q", *s) +} + +// StringPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. +func StringPresent(s *string) bool { + if s == nil { + return false + } + return *s != "" +} + +// TimeDuration returns a pointer to the given time.Duration. +func TimeDuration(t time.Duration) *time.Duration { + return &t +} + +// TimeDurationVal returns the value of the string at the pointer, or 0 if the +// pointer is nil. +func TimeDurationVal(t *time.Duration) time.Duration { + if t == nil { + return time.Duration(0) + } + return *t +} + +// TimeDurationGoString returns the value of the time.Duration for printing in a +// string. +func TimeDurationGoString(t *time.Duration) string { + if t == nil { + return "(*time.Duration)(nil)" + } + return fmt.Sprintf("%s", t) +} + +// TimeDurationPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. +func TimeDurationPresent(t *time.Duration) bool { + if t == nil { + return false + } + return *t != 0 +} diff --git a/vendor/github.com/hashicorp/consul-template/config/dedup.go b/vendor/github.com/hashicorp/consul-template/config/dedup.go new file mode 100644 index 000000000..247855a93 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/dedup.go @@ -0,0 +1,132 @@ +package config + +import ( + "fmt" + "time" +) + +const ( + // DefaultDedupPrefix is the default prefix used for deduplication mode. + DefaultDedupPrefix = "consul-template/dedup/" + + // DefaultDedupTTL is the default TTL for deduplicate mode. + DefaultDedupTTL = 15 * time.Second + + // DefaultDedupMaxStale is the default max staleness for the deduplication + // manager. + DefaultDedupMaxStale = DefaultMaxStale +) + +// DedupConfig is used to enable the de-duplication mode, which depends +// on electing a leader per-template and watching of a key. This is used +// to reduce the cost of many instances of CT running the same template. +type DedupConfig struct { + // Controls if deduplication mode is enabled + Enabled *bool `mapstructure:"enabled"` + + // MaxStale is the maximum amount of time to allow for stale queries. + MaxStale *time.Duration `mapstructure:"max_stale"` + + // Controls the KV prefix used. Defaults to defaultDedupPrefix + Prefix *string `mapstructure:"prefix"` + + // TTL is the Session TTL used for lock acquisition, defaults to 15 seconds. + TTL *time.Duration `mapstructure:"ttl"` +} + +// DefaultDedupConfig returns a configuration that is populated with the +// default values. +func DefaultDedupConfig() *DedupConfig { + return &DedupConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *DedupConfig) Copy() *DedupConfig { + if c == nil { + return nil + } + + var o DedupConfig + o.Enabled = c.Enabled + o.MaxStale = c.MaxStale + o.Prefix = c.Prefix + o.TTL = c.TTL + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *DedupConfig) Merge(o *DedupConfig) *DedupConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.MaxStale != nil { + r.MaxStale = o.MaxStale + } + + if o.Prefix != nil { + r.Prefix = o.Prefix + } + + if o.TTL != nil { + r.TTL = o.TTL + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *DedupConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + TimeDurationPresent(c.MaxStale) || + StringPresent(c.Prefix) || + TimeDurationPresent(c.TTL)) + } + + if c.MaxStale == nil { + c.MaxStale = TimeDuration(DefaultDedupMaxStale) + } + + if c.Prefix == nil { + c.Prefix = String(DefaultDedupPrefix) + } + + if c.TTL == nil { + c.TTL = TimeDuration(DefaultDedupTTL) + } +} + +// GoString defines the printable version of this struct. +func (c *DedupConfig) GoString() string { + if c == nil { + return "(*DedupConfig)(nil)" + } + return fmt.Sprintf("&DedupConfig{"+ + "Enabled:%s, "+ + "MaxStale:%s, "+ + "Prefix:%s, "+ + "TTL:%s"+ + "}", + BoolGoString(c.Enabled), + TimeDurationGoString(c.MaxStale), + StringGoString(c.Prefix), + TimeDurationGoString(c.TTL), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/env.go b/vendor/github.com/hashicorp/consul-template/config/env.go new file mode 100644 index 000000000..a9a4b1ebe --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/env.go @@ -0,0 +1,209 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// EnvConfig is an embeddable struct for things that accept environment +// variable filtering. You should not use this directly and it is only public +// for mapstructure's decoding. +type EnvConfig struct { + // BlacklistEnv specifies a list of environment variables to explicitly + // exclude from the list of environment variables populated to the child. + // If both WhitelistEnv and BlacklistEnv are provided, BlacklistEnv takes + // precedence over the values in WhitelistEnv. + Blacklist []string `mapstructure:"blacklist"` + + // CustomEnv specifies custom environment variables to pass to the child + // process. These are provided programmatically, override any environment + // variables of the same name, are ignored from whitelist/blacklist, and + // are still included even if PristineEnv is set to true. + Custom []string `mapstructure:"custom"` + + // PristineEnv specifies if the child process should inherit the parent's + // environment. + Pristine *bool `mapstructure:"pristine"` + + // WhitelistEnv specifies a list of environment variables to exclusively + // include in the list of environment variables populated to the child. + Whitelist []string `mapstructure:"whitelist"` +} + +// DefaultEnvConfig returns a configuration that is populated with the +// default values. +func DefaultEnvConfig() *EnvConfig { + return &EnvConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *EnvConfig) Copy() *EnvConfig { + if c == nil { + return nil + } + + var o EnvConfig + + if c.Blacklist != nil { + o.Blacklist = append([]string{}, c.Blacklist...) + } + + if c.Custom != nil { + o.Custom = append([]string{}, c.Custom...) + } + + o.Pristine = c.Pristine + + if c.Whitelist != nil { + o.Whitelist = append([]string{}, c.Whitelist...) + } + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *EnvConfig) Merge(o *EnvConfig) *EnvConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Blacklist != nil { + r.Blacklist = append(r.Blacklist, o.Blacklist...) + } + + if o.Custom != nil { + r.Custom = append(r.Custom, o.Custom...) + } + + if o.Pristine != nil { + r.Pristine = o.Pristine + } + + if o.Whitelist != nil { + r.Whitelist = append(r.Whitelist, o.Whitelist...) + } + + return r +} + +// Env calculates and returns the finalized environment for this exec +// configuration. It takes into account pristine, custom environment, whitelist, +// and blacklist values. +func (c *EnvConfig) Env() []string { + // In pristine mode, just return the custom environment. If the user did not + // specify a custom environment, just return the empty slice to force an + // empty environment. We cannot return nil here because the later call to + // os/exec will think we want to inherit the parent. + if BoolVal(c.Pristine) { + if len(c.Custom) > 0 { + return c.Custom + } + return []string{} + } + + // Pull all the key-value pairs out of the environment + environ := os.Environ() + keys := make([]string, len(environ)) + env := make(map[string]string, len(environ)) + for i, v := range environ { + list := strings.SplitN(v, "=", 2) + keys[i] = list[0] + env[list[0]] = list[1] + } + + // anyGlobMatch is a helper function which checks if any of the given globs + // match the string. + anyGlobMatch := func(s string, patterns []string) bool { + for _, pattern := range patterns { + if matched, _ := filepath.Match(pattern, s); matched { + return true + } + } + return false + } + + // Pull out any envvars that match the whitelist. + if len(c.Whitelist) > 0 { + newKeys := make([]string, 0, len(keys)) + for _, k := range keys { + if anyGlobMatch(k, c.Whitelist) { + newKeys = append(newKeys, k) + } + } + keys = newKeys + } + + // Remove any envvars that match the blacklist. + if len(c.Blacklist) > 0 { + newKeys := make([]string, 0, len(keys)) + for _, k := range keys { + if !anyGlobMatch(k, c.Blacklist) { + newKeys = append(newKeys, k) + } + } + keys = newKeys + } + + // Build the final list using only the filtered keys. + finalEnv := make([]string, 0, len(keys)+len(c.Custom)) + for _, k := range keys { + finalEnv = append(finalEnv, k+"="+env[k]) + } + + // Append remaining custom environment. + finalEnv = append(finalEnv, c.Custom...) + + return finalEnv +} + +// Finalize ensures there no nil pointers. +func (c *EnvConfig) Finalize() { + if c.Blacklist == nil { + c.Blacklist = []string{} + } + + if c.Custom == nil { + c.Custom = []string{} + } + + if c.Pristine == nil { + c.Pristine = Bool(false) + } + + if c.Whitelist == nil { + c.Whitelist = []string{} + } +} + +// GoString defines the printable version of this struct. +func (c *EnvConfig) GoString() string { + if c == nil { + return "(*EnvConfig)(nil)" + } + + return fmt.Sprintf("&EnvConfig{"+ + "Blacklist:%v, "+ + "Custom:%v, "+ + "Pristine:%s, "+ + "Whitelist:%v"+ + "}", + c.Blacklist, + c.Custom, + BoolGoString(c.Pristine), + c.Whitelist, + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/exec.go b/vendor/github.com/hashicorp/consul-template/config/exec.go new file mode 100644 index 000000000..22c7070a4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/exec.go @@ -0,0 +1,216 @@ +package config + +import ( + "fmt" + "os" + "syscall" + "time" +) + +const ( + // DefaultExecKillSignal is the default signal to send to the process to + // tell it to gracefully terminate. + DefaultExecKillSignal = syscall.SIGINT + + // DefaultExecKillTimeout is the maximum amount of time to wait for the + // process to gracefully terminate before force-killing it. + DefaultExecKillTimeout = 30 * time.Second + + // DefaultExecTimeout is the default amount of time to wait for a + // command to exit. By default, this is disabled, which means the command + // is allowed to run for an infinite amount of time. + DefaultExecTimeout = 0 * time.Second +) + +var ( + // DefaultExecReloadSignal is the default signal to send to the process to + // tell it to reload its configuration. + DefaultExecReloadSignal = (os.Signal)(nil) +) + +// ExecConfig is used to configure the application when it runs in +// exec/supervise mode. +type ExecConfig struct { + // Command is the command to execute and watch as a child process. + Command *string `mapstructure:"command"` + + // Enabled controls if this exec is enabled. + Enabled *bool `mapstructure:"enabled"` + + // EnvConfig is the environmental customizations. + Env *EnvConfig `mapstructure:"env"` + + // KillSignal is the signal to send to the command to kill it gracefully. The + // default value is "SIGTERM". + KillSignal *os.Signal `mapstructure:"kill_signal"` + + // KillTimeout is the amount of time to give the process to cleanup before + // hard-killing it. + KillTimeout *time.Duration `mapstructure:"kill_timeout"` + + // ReloadSignal is the signal to send to the child process when a template + // changes. This tells the child process that templates have + ReloadSignal *os.Signal `mapstructure:"reload_signal"` + + // Splay is the maximum amount of random time to wait to signal or kill the + // process. By default this is disabled, but it can be set to low values to + // reduce the "thundering herd" problem where all tasks are restarted at once. + Splay *time.Duration `mapstructure:"splay"` + + // Timeout is the maximum amount of time to wait for a command to complete. + // By default, this is 0, which means "wait forever". + Timeout *time.Duration `mapstructure:"timeout"` +} + +// DefaultExecConfig returns a configuration that is populated with the +// default values. +func DefaultExecConfig() *ExecConfig { + return &ExecConfig{ + Env: DefaultEnvConfig(), + } +} + +// Copy returns a deep copy of this configuration. +func (c *ExecConfig) Copy() *ExecConfig { + if c == nil { + return nil + } + + var o ExecConfig + + o.Command = c.Command + + o.Enabled = c.Enabled + + if c.Env != nil { + o.Env = c.Env.Copy() + } + + o.KillSignal = c.KillSignal + + o.KillTimeout = c.KillTimeout + + o.ReloadSignal = c.ReloadSignal + + o.Splay = c.Splay + + o.Timeout = c.Timeout + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *ExecConfig) Merge(o *ExecConfig) *ExecConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Command != nil { + r.Command = o.Command + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Env != nil { + r.Env = r.Env.Merge(o.Env) + } + + if o.KillSignal != nil { + r.KillSignal = o.KillSignal + } + + if o.KillTimeout != nil { + r.KillTimeout = o.KillTimeout + } + + if o.ReloadSignal != nil { + r.ReloadSignal = o.ReloadSignal + } + + if o.Splay != nil { + r.Splay = o.Splay + } + + if o.Timeout != nil { + r.Timeout = o.Timeout + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *ExecConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Command)) + } + + if c.Command == nil { + c.Command = String("") + } + + if c.Env == nil { + c.Env = DefaultEnvConfig() + } + c.Env.Finalize() + + if c.KillSignal == nil { + c.KillSignal = Signal(DefaultExecKillSignal) + } + + if c.KillTimeout == nil { + c.KillTimeout = TimeDuration(DefaultExecKillTimeout) + } + + if c.ReloadSignal == nil { + c.ReloadSignal = Signal(DefaultExecReloadSignal) + } + + if c.Splay == nil { + c.Splay = TimeDuration(0 * time.Second) + } + + if c.Timeout == nil { + c.Timeout = TimeDuration(DefaultExecTimeout) + } +} + +// GoString defines the printable version of this struct. +func (c *ExecConfig) GoString() string { + if c == nil { + return "(*ExecConfig)(nil)" + } + + return fmt.Sprintf("&ExecConfig{"+ + "Command:%s, "+ + "Enabled:%s, "+ + "Env:%#v, "+ + "KillSignal:%s, "+ + "KillTimeout:%s, "+ + "ReloadSignal:%s, "+ + "Splay:%s, "+ + "Timeout:%s"+ + "}", + StringGoString(c.Command), + BoolGoString(c.Enabled), + c.Env, + SignalGoString(c.KillSignal), + TimeDurationGoString(c.KillTimeout), + SignalGoString(c.ReloadSignal), + TimeDurationGoString(c.Splay), + TimeDurationGoString(c.Timeout), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/mapstructure.go b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go new file mode 100644 index 000000000..64ea53933 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go @@ -0,0 +1,75 @@ +package config + +import ( + "log" + "os" + "reflect" + "strconv" + + "github.com/mitchellh/mapstructure" +) + +// StringToFileModeFunc returns a function that converts strings to os.FileMode +// value. This is designed to be used with mapstructure for parsing out a +// filemode value. +func StringToFileModeFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(os.FileMode(0)) { + return data, nil + } + + // Convert it by parsing + v, err := strconv.ParseUint(data.(string), 8, 12) + if err != nil { + return data, err + } + return os.FileMode(v), nil + } +} + +// StringToWaitDurationHookFunc returns a function that converts strings to wait +// value. This is designed to be used with mapstructure for parsing out a wait +// value. +func StringToWaitDurationHookFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(WaitConfig{}) { + return data, nil + } + + // Convert it by parsing + return ParseWaitConfig(data.(string)) + } +} + +// ConsulStringToStructFunc checks if the value set for the key should actually +// be a struct and sets the appropriate value in the struct. This is for +// backwards-compatability with older versions of Consul Template. +func ConsulStringToStructFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if t == reflect.TypeOf(ConsulConfig{}) && f.Kind() == reflect.String { + log.Println("[WARN] consul now accepts a stanza instead of a string. " + + "Update your configuration files and change consul = \"\" to " + + "consul { } instead.") + return &ConsulConfig{ + Address: String(data.(string)), + }, nil + } + + return data, nil + } +} diff --git a/vendor/github.com/hashicorp/consul-template/config/retry.go b/vendor/github.com/hashicorp/consul-template/config/retry.go new file mode 100644 index 000000000..0a4346cff --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/retry.go @@ -0,0 +1,170 @@ +package config + +import ( + "fmt" + "math" + "time" +) + +const ( + // DefaultRetryAttempts is the default number of maximum retry attempts. + DefaultRetryAttempts = 12 + + // DefaultRetryBackoff is the default base for the exponential backoff + // algorithm. + DefaultRetryBackoff = 250 * time.Millisecond + + // DefaultRetryMaxBackoff is the default maximum of backoff time + DefaultRetryMaxBackoff = 1 * time.Minute +) + +// RetryFunc is the signature of a function that supports retries. +type RetryFunc func(int) (bool, time.Duration) + +// RetryConfig is a shared configuration for upstreams that support retires on +// failure. +type RetryConfig struct { + // Attempts is the total number of maximum attempts to retry before letting + // the error fall through. + // 0 means unlimited. + Attempts *int + + // Backoff is the base of the exponentialbackoff. This number will be + // multiplied by the next power of 2 on each iteration. + Backoff *time.Duration + + // MaxBackoff is an upper limit to the sleep time between retries + // A MaxBackoff of zero means there is no limit to the exponential growth of the backoff. + MaxBackoff *time.Duration `mapstructure:"max_backoff"` + + // Enabled signals if this retry is enabled. + Enabled *bool +} + +// DefaultRetryConfig returns a configuration that is populated with the +// default values. +func DefaultRetryConfig() *RetryConfig { + return &RetryConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *RetryConfig) Copy() *RetryConfig { + if c == nil { + return nil + } + + var o RetryConfig + + o.Attempts = c.Attempts + + o.Backoff = c.Backoff + + o.MaxBackoff = c.MaxBackoff + + o.Enabled = c.Enabled + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *RetryConfig) Merge(o *RetryConfig) *RetryConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Attempts != nil { + r.Attempts = o.Attempts + } + + if o.Backoff != nil { + r.Backoff = o.Backoff + } + + if o.MaxBackoff != nil { + r.MaxBackoff = o.MaxBackoff + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + return r +} + +// RetryFunc returns the retry function associated with this configuration. +func (c *RetryConfig) RetryFunc() RetryFunc { + return func(retry int) (bool, time.Duration) { + if !BoolVal(c.Enabled) { + return false, 0 + } + + if IntVal(c.Attempts) > 0 && retry > IntVal(c.Attempts)-1 { + return false, 0 + } + + baseSleep := TimeDurationVal(c.Backoff) + maxSleep := TimeDurationVal(c.MaxBackoff) + + if maxSleep > 0 { + attemptsTillMaxBackoff := int(math.Log2(maxSleep.Seconds() / baseSleep.Seconds())) + if retry > attemptsTillMaxBackoff { + return true, maxSleep + } + } + + base := math.Pow(2, float64(retry)) + sleep := time.Duration(base) * baseSleep + + return true, sleep + } +} + +// Finalize ensures there no nil pointers. +func (c *RetryConfig) Finalize() { + if c.Attempts == nil { + c.Attempts = Int(DefaultRetryAttempts) + } + + if c.Backoff == nil { + c.Backoff = TimeDuration(DefaultRetryBackoff) + } + + if c.MaxBackoff == nil { + c.MaxBackoff = TimeDuration(DefaultRetryMaxBackoff) + } + + if c.Enabled == nil { + c.Enabled = Bool(true) + } +} + +// GoString defines the printable version of this struct. +func (c *RetryConfig) GoString() string { + if c == nil { + return "(*RetryConfig)(nil)" + } + + return fmt.Sprintf("&RetryConfig{"+ + "Attempts:%s, "+ + "Backoff:%s, "+ + "MaxBackoff:%s, "+ + "Enabled:%s"+ + "}", + IntGoString(c.Attempts), + TimeDurationGoString(c.Backoff), + TimeDurationGoString(c.MaxBackoff), + BoolGoString(c.Enabled), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/ssl.go b/vendor/github.com/hashicorp/consul-template/config/ssl.go new file mode 100644 index 000000000..ab3b77e61 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/ssl.go @@ -0,0 +1,153 @@ +package config + +import "fmt" + +const ( + // DefaultSSLVerify is the default value for SSL verification. + DefaultSSLVerify = true +) + +// SSLConfig is the configuration for SSL. +type SSLConfig struct { + CaCert *string `mapstructure:"ca_cert"` + CaPath *string `mapstructure:"ca_path"` + Cert *string `mapstructure:"cert"` + Enabled *bool `mapstructure:"enabled"` + Key *string `mapstructure:"key"` + ServerName *string `mapstructure:"server_name"` + Verify *bool `mapstructure:"verify"` +} + +// DefaultSSLConfig returns a configuration that is populated with the +// default values. +func DefaultSSLConfig() *SSLConfig { + return &SSLConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *SSLConfig) Copy() *SSLConfig { + if c == nil { + return nil + } + + var o SSLConfig + o.CaCert = c.CaCert + o.CaPath = c.CaPath + o.Cert = c.Cert + o.Enabled = c.Enabled + o.Key = c.Key + o.ServerName = c.ServerName + o.Verify = c.Verify + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *SSLConfig) Merge(o *SSLConfig) *SSLConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Cert != nil { + r.Cert = o.Cert + } + + if o.CaCert != nil { + r.CaCert = o.CaCert + } + + if o.CaPath != nil { + r.CaPath = o.CaPath + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Key != nil { + r.Key = o.Key + } + + if o.ServerName != nil { + r.ServerName = o.ServerName + } + + if o.Verify != nil { + r.Verify = o.Verify + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *SSLConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + StringPresent(c.Cert) || + StringPresent(c.CaCert) || + StringPresent(c.CaPath) || + StringPresent(c.Key) || + StringPresent(c.ServerName) || + BoolPresent(c.Verify)) + } + + if c.Cert == nil { + c.Cert = String("") + } + + if c.CaCert == nil { + c.CaCert = String("") + } + + if c.CaPath == nil { + c.CaPath = String("") + } + + if c.Key == nil { + c.Key = String("") + } + + if c.ServerName == nil { + c.ServerName = String("") + } + + if c.Verify == nil { + c.Verify = Bool(DefaultSSLVerify) + } +} + +// GoString defines the printable version of this struct. +func (c *SSLConfig) GoString() string { + if c == nil { + return "(*SSLConfig)(nil)" + } + + return fmt.Sprintf("&SSLConfig{"+ + "CaCert:%s, "+ + "CaPath:%s, "+ + "Cert:%s, "+ + "Enabled:%s, "+ + "Key:%s, "+ + "ServerName:%s, "+ + "Verify:%s"+ + "}", + StringGoString(c.CaCert), + StringGoString(c.CaPath), + StringGoString(c.Cert), + BoolGoString(c.Enabled), + StringGoString(c.Key), + StringGoString(c.ServerName), + BoolGoString(c.Verify), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/syslog.go b/vendor/github.com/hashicorp/consul-template/config/syslog.go new file mode 100644 index 000000000..0de67199d --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/syslog.go @@ -0,0 +1,87 @@ +package config + +import "fmt" + +const ( + // DefaultSyslogFacility is the default facility to log to. + DefaultSyslogFacility = "LOCAL0" +) + +// SyslogConfig is the configuration for syslog. +type SyslogConfig struct { + Enabled *bool `mapstructure:"enabled"` + Facility *string `mapstructure:"facility"` +} + +// DefaultSyslogConfig returns a configuration that is populated with the +// default values. +func DefaultSyslogConfig() *SyslogConfig { + return &SyslogConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *SyslogConfig) Copy() *SyslogConfig { + if c == nil { + return nil + } + + var o SyslogConfig + o.Enabled = c.Enabled + o.Facility = c.Facility + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *SyslogConfig) Merge(o *SyslogConfig) *SyslogConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Facility != nil { + r.Facility = o.Facility + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *SyslogConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Facility)) + } + + if c.Facility == nil { + c.Facility = String(DefaultSyslogFacility) + } +} + +// GoString defines the printable version of this struct. +func (c *SyslogConfig) GoString() string { + if c == nil { + return "(*SyslogConfig)(nil)" + } + + return fmt.Sprintf("&SyslogConfig{"+ + "Enabled:%s, "+ + "Facility:%s"+ + "}", + BoolGoString(c.Enabled), + StringGoString(c.Facility), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/template.go b/vendor/github.com/hashicorp/consul-template/config/template.go new file mode 100644 index 000000000..4f69bfb60 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/template.go @@ -0,0 +1,458 @@ +package config + +import ( + "errors" + "fmt" + "os" + "regexp" + "strings" + "time" +) + +const ( + // DefaultTemplateCommandTimeout is the amount of time to wait for a command + // to return. + DefaultTemplateCommandTimeout = 30 * time.Second +) + +var ( + // ErrTemplateStringEmpty is the error returned with the template contents + // are empty. + ErrTemplateStringEmpty = errors.New("template: cannot be empty") + + // configTemplateRe is the pattern to split the config template syntax. + configTemplateRe = regexp.MustCompile("([a-zA-Z]:)?([^:]+)") +) + +// TemplateConfig is a representation of a template on disk, as well as the +// associated commands and reload instructions. +type TemplateConfig struct { + // Backup determines if this template should retain a backup. The default + // value is false. + Backup *bool `mapstructure:"backup"` + + // Command is the arbitrary command to execute after a template has + // successfully rendered. This is DEPRECATED. Use Exec instead. + Command *string `mapstructure:"command"` + + // CommandTimeout is the amount of time to wait for the command to finish + // before force-killing it. This is DEPRECATED. Use Exec instead. + CommandTimeout *time.Duration `mapstructure:"command_timeout"` + + // Contents are the raw template contents to evaluate. Either this or Source + // must be specified, but not both. + Contents *string `mapstructure:"contents"` + + // CreateDestDirs tells Consul Template to create the parent directories of + // the destination path if they do not exist. The default value is true. + CreateDestDirs *bool `mapstructure:"create_dest_dirs"` + + // Destination is the location on disk where the template should be rendered. + // This is required unless running in debug/dry mode. + Destination *string `mapstructure:"destination"` + + // ErrMissingKey is used to control how the template behaves when attempting + // to index a struct or map key that does not exist. + ErrMissingKey *bool `mapstructure:"error_on_missing_key"` + + // Exec is the configuration for the command to run when the template renders + // successfully. + Exec *ExecConfig `mapstructure:"exec"` + + // Perms are the file system permissions to use when creating the file on + // disk. This is useful for when files contain sensitive information, such as + // secrets from Vault. + Perms *os.FileMode `mapstructure:"perms"` + + // Source is the path on disk to the template contents to evaluate. Either + // this or Contents should be specified, but not both. + Source *string `mapstructure:"source"` + + // Wait configures per-template quiescence timers. + Wait *WaitConfig `mapstructure:"wait"` + + // LeftDelim and RightDelim are optional configurations to control what + // delimiter is utilized when parsing the template. + LeftDelim *string `mapstructure:"left_delimiter"` + RightDelim *string `mapstructure:"right_delimiter"` + + // FunctionBlacklist is a list of functions that this template is not + // permitted to run. + FunctionBlacklist []string `mapstructure:"function_blacklist"` + + // SandboxPath adds a prefix to any path provided to the `file` function + // and causes an error if a relative path tries to traverse outside that + // prefix. + SandboxPath *string `mapstructure:"sandbox_path"` +} + +// DefaultTemplateConfig returns a configuration that is populated with the +// default values. +func DefaultTemplateConfig() *TemplateConfig { + return &TemplateConfig{ + Exec: DefaultExecConfig(), + Wait: DefaultWaitConfig(), + } +} + +// Copy returns a deep copy of this configuration. +func (c *TemplateConfig) Copy() *TemplateConfig { + if c == nil { + return nil + } + + var o TemplateConfig + + o.Backup = c.Backup + + o.Command = c.Command + + o.CommandTimeout = c.CommandTimeout + + o.Contents = c.Contents + + o.CreateDestDirs = c.CreateDestDirs + + o.Destination = c.Destination + + o.ErrMissingKey = c.ErrMissingKey + + if c.Exec != nil { + o.Exec = c.Exec.Copy() + } + + o.Perms = c.Perms + + o.Source = c.Source + + if c.Wait != nil { + o.Wait = c.Wait.Copy() + } + + o.LeftDelim = c.LeftDelim + o.RightDelim = c.RightDelim + + for _, fun := range c.FunctionBlacklist { + o.FunctionBlacklist = append(o.FunctionBlacklist, fun) + } + o.SandboxPath = c.SandboxPath + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *TemplateConfig) Merge(o *TemplateConfig) *TemplateConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Backup != nil { + r.Backup = o.Backup + } + + if o.Command != nil { + r.Command = o.Command + } + + if o.CommandTimeout != nil { + r.CommandTimeout = o.CommandTimeout + } + + if o.Contents != nil { + r.Contents = o.Contents + } + + if o.CreateDestDirs != nil { + r.CreateDestDirs = o.CreateDestDirs + } + + if o.Destination != nil { + r.Destination = o.Destination + } + + if o.ErrMissingKey != nil { + r.ErrMissingKey = o.ErrMissingKey + } + + if o.Exec != nil { + r.Exec = r.Exec.Merge(o.Exec) + } + + if o.Perms != nil { + r.Perms = o.Perms + } + + if o.Source != nil { + r.Source = o.Source + } + + if o.Wait != nil { + r.Wait = r.Wait.Merge(o.Wait) + } + + if o.LeftDelim != nil { + r.LeftDelim = o.LeftDelim + } + + if o.RightDelim != nil { + r.RightDelim = o.RightDelim + } + + for _, fun := range o.FunctionBlacklist { + r.FunctionBlacklist = append(r.FunctionBlacklist, fun) + } + if o.SandboxPath != nil { + r.SandboxPath = o.SandboxPath + } + + return r +} + +// Finalize ensures the configuration has no nil pointers and sets default +// values. +func (c *TemplateConfig) Finalize() { + if c.Backup == nil { + c.Backup = Bool(false) + } + + if c.Command == nil { + c.Command = String("") + } + + if c.CommandTimeout == nil { + c.CommandTimeout = TimeDuration(DefaultTemplateCommandTimeout) + } + + if c.Contents == nil { + c.Contents = String("") + } + + if c.CreateDestDirs == nil { + c.CreateDestDirs = Bool(true) + } + + if c.Destination == nil { + c.Destination = String("") + } + + if c.ErrMissingKey == nil { + c.ErrMissingKey = Bool(false) + } + + if c.Exec == nil { + c.Exec = DefaultExecConfig() + } + + // Backwards compat for specifying command directly + if c.Exec.Command == nil && c.Command != nil { + c.Exec.Command = c.Command + } + if c.Exec.Timeout == nil && c.CommandTimeout != nil { + c.Exec.Timeout = c.CommandTimeout + } + c.Exec.Finalize() + + if c.Perms == nil { + c.Perms = FileMode(0) + } + + if c.Source == nil { + c.Source = String("") + } + + if c.Wait == nil { + c.Wait = DefaultWaitConfig() + } + c.Wait.Finalize() + + if c.LeftDelim == nil { + c.LeftDelim = String("") + } + + if c.RightDelim == nil { + c.RightDelim = String("") + } + + if c.SandboxPath == nil { + c.SandboxPath = String("") + } +} + +// GoString defines the printable version of this struct. +func (c *TemplateConfig) GoString() string { + if c == nil { + return "(*TemplateConfig)(nil)" + } + + return fmt.Sprintf("&TemplateConfig{"+ + "Backup:%s, "+ + "Command:%s, "+ + "CommandTimeout:%s, "+ + "Contents:%s, "+ + "CreateDestDirs:%s, "+ + "Destination:%s, "+ + "ErrMissingKey:%s, "+ + "Exec:%#v, "+ + "Perms:%s, "+ + "Source:%s, "+ + "Wait:%#v, "+ + "LeftDelim:%s, "+ + "RightDelim:%s"+ + "FunctionBlacklist:%s"+ + "SandboxPath:%s"+ + "}", + BoolGoString(c.Backup), + StringGoString(c.Command), + TimeDurationGoString(c.CommandTimeout), + StringGoString(c.Contents), + BoolGoString(c.CreateDestDirs), + StringGoString(c.Destination), + BoolGoString(c.ErrMissingKey), + c.Exec, + FileModeGoString(c.Perms), + StringGoString(c.Source), + c.Wait, + StringGoString(c.LeftDelim), + StringGoString(c.RightDelim), + c.FunctionBlacklist, + StringGoString(c.SandboxPath), + ) +} + +// Display is the human-friendly form of this configuration. It tries to +// describe this template in as much detail as possible in a single line, so +// log consumers can uniquely identify it. +func (c *TemplateConfig) Display() string { + if c == nil { + return "" + } + + source := c.Source + if StringPresent(c.Contents) { + source = String("(dynamic)") + } + + return fmt.Sprintf("%q => %q", + StringVal(source), + StringVal(c.Destination), + ) +} + +// TemplateConfigs is a collection of TemplateConfigs +type TemplateConfigs []*TemplateConfig + +// DefaultTemplateConfigs returns a configuration that is populated with the +// default values. +func DefaultTemplateConfigs() *TemplateConfigs { + return &TemplateConfigs{} +} + +// Copy returns a deep copy of this configuration. +func (c *TemplateConfigs) Copy() *TemplateConfigs { + o := make(TemplateConfigs, len(*c)) + for i, t := range *c { + o[i] = t.Copy() + } + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *TemplateConfigs) Merge(o *TemplateConfigs) *TemplateConfigs { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + *r = append(*r, *o...) + + return r +} + +// Finalize ensures the configuration has no nil pointers and sets default +// values. +func (c *TemplateConfigs) Finalize() { + if c == nil { + *c = *DefaultTemplateConfigs() + } + + for _, t := range *c { + t.Finalize() + } +} + +// GoString defines the printable version of this struct. +func (c *TemplateConfigs) GoString() string { + if c == nil { + return "(*TemplateConfigs)(nil)" + } + + s := make([]string, len(*c)) + for i, t := range *c { + s[i] = t.GoString() + } + + return "{" + strings.Join(s, ", ") + "}" +} + +// ParseTemplateConfig parses a string in the form source:destination:command +// into a TemplateConfig. +func ParseTemplateConfig(s string) (*TemplateConfig, error) { + if len(strings.TrimSpace(s)) < 1 { + return nil, ErrTemplateStringEmpty + } + + var source, destination, command string + parts := configTemplateRe.FindAllString(s, -1) + + switch len(parts) { + case 1: + source = parts[0] + case 2: + source, destination = parts[0], parts[1] + case 3: + source, destination, command = parts[0], parts[1], parts[2] + default: + source, destination = parts[0], parts[1] + command = strings.Join(parts[2:], ":") + } + + var sourcePtr, destinationPtr, commandPtr *string + if source != "" { + sourcePtr = String(source) + } + if destination != "" { + destinationPtr = String(destination) + } + if command != "" { + commandPtr = String(command) + } + + return &TemplateConfig{ + Source: sourcePtr, + Destination: destinationPtr, + Command: commandPtr, + }, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/config/transport.go b/vendor/github.com/hashicorp/consul-template/config/transport.go new file mode 100644 index 000000000..dc218daa2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/transport.go @@ -0,0 +1,188 @@ +package config + +import ( + "fmt" + "runtime" + "time" +) + +const ( + // DefaultDialKeepAlive is the default amount of time to keep alive + // connections. + DefaultDialKeepAlive = 30 * time.Second + + // DefaultDialTimeout is the amount of time to attempt to dial before timing + // out. + DefaultDialTimeout = 30 * time.Second + + // DefaultIdleConnTimeout is the default connection timeout for idle + // connections. + DefaultIdleConnTimeout = 90 * time.Second + + // DefaultMaxIdleConns is the default number of maximum idle connections. + DefaultMaxIdleConns = 100 + + // DefaultTLSHandshakeTimeout is the amount of time to negotiate the TLS + // handshake. + DefaultTLSHandshakeTimeout = 10 * time.Second +) + +var ( + // DefaultMaxIdleConnsPerHost is the default number of idle connections to use + // per host. + DefaultMaxIdleConnsPerHost = runtime.GOMAXPROCS(0) + 1 +) + +// TransportConfig is the configuration to tune low-level APIs for the +// interactions on the wire. +type TransportConfig struct { + // DialKeepAlive is the amount of time for keep-alives. + DialKeepAlive *time.Duration `mapstructure:"dial_keep_alive"` + + // DialTimeout is the amount of time to wait to establish a connection. + DialTimeout *time.Duration `mapstructure:"dial_timeout"` + + // DisableKeepAlives determines if keep-alives should be used. Disabling this + // significantly decreases performance. + DisableKeepAlives *bool `mapstructure:"disable_keep_alives"` + + // IdleConnTimeout is the timeout for idle connections. + IdleConnTimeout *time.Duration `mapstructure:"idle_conn_timeout"` + + // MaxIdleConns is the maximum number of total idle connections. + MaxIdleConns *int `mapstructure:"max_idle_conns"` + + // MaxIdleConnsPerHost is the maximum number of idle connections per remote + // host. + MaxIdleConnsPerHost *int `mapstructure:"max_idle_conns_per_host"` + + // TLSHandshakeTimeout is the amount of time to wait to complete the TLS + // handshake. + TLSHandshakeTimeout *time.Duration `mapstructure:"tls_handshake_timeout"` +} + +// DefaultTransportConfig returns a configuration that is populated with the +// default values. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *TransportConfig) Copy() *TransportConfig { + if c == nil { + return nil + } + + var o TransportConfig + + o.DialKeepAlive = c.DialKeepAlive + o.DialTimeout = c.DialTimeout + o.DisableKeepAlives = c.DisableKeepAlives + o.IdleConnTimeout = c.IdleConnTimeout + o.MaxIdleConns = c.MaxIdleConns + o.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost + o.TLSHandshakeTimeout = c.TLSHandshakeTimeout + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *TransportConfig) Merge(o *TransportConfig) *TransportConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.DialKeepAlive != nil { + r.DialKeepAlive = o.DialKeepAlive + } + + if o.DialTimeout != nil { + r.DialTimeout = o.DialTimeout + } + + if o.DisableKeepAlives != nil { + r.DisableKeepAlives = o.DisableKeepAlives + } + + if o.IdleConnTimeout != nil { + r.IdleConnTimeout = o.IdleConnTimeout + } + + if o.MaxIdleConns != nil { + r.MaxIdleConns = o.MaxIdleConns + } + + if o.MaxIdleConnsPerHost != nil { + r.MaxIdleConnsPerHost = o.MaxIdleConnsPerHost + } + + if o.TLSHandshakeTimeout != nil { + r.TLSHandshakeTimeout = o.TLSHandshakeTimeout + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *TransportConfig) Finalize() { + if c.DialKeepAlive == nil { + c.DialKeepAlive = TimeDuration(DefaultDialKeepAlive) + } + + if c.DialTimeout == nil { + c.DialTimeout = TimeDuration(DefaultDialTimeout) + } + + if c.DisableKeepAlives == nil { + c.DisableKeepAlives = Bool(false) + } + + if c.IdleConnTimeout == nil { + c.IdleConnTimeout = TimeDuration(DefaultIdleConnTimeout) + } + + if c.MaxIdleConns == nil { + c.MaxIdleConns = Int(DefaultMaxIdleConns) + } + + if c.MaxIdleConnsPerHost == nil { + c.MaxIdleConnsPerHost = Int(DefaultMaxIdleConnsPerHost) + } + + if c.TLSHandshakeTimeout == nil { + c.TLSHandshakeTimeout = TimeDuration(DefaultTLSHandshakeTimeout) + } +} + +// GoString defines the printable version of this struct. +func (c *TransportConfig) GoString() string { + if c == nil { + return "(*TransportConfig)(nil)" + } + + return fmt.Sprintf("&TransportConfig{"+ + "DialKeepAlive:%s, "+ + "DialTimeout:%s, "+ + "DisableKeepAlives:%t, "+ + "MaxIdleConnsPerHost:%d, "+ + "TLSHandshakeTimeout:%s"+ + "}", + TimeDurationVal(c.DialKeepAlive), + TimeDurationVal(c.DialTimeout), + BoolVal(c.DisableKeepAlives), + IntVal(c.MaxIdleConnsPerHost), + TimeDurationVal(c.TLSHandshakeTimeout), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/vault.go b/vendor/github.com/hashicorp/consul-template/config/vault.go new file mode 100644 index 000000000..0ba4cce73 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/vault.go @@ -0,0 +1,327 @@ +package config + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/api" +) + +const ( + // XXX Change use to api.EnvVaultSkipVerify once we've updated vendored + // vault to version 1.1.0 or newer. + EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" + + // DefaultVaultGrace is the default grace period before which to read a new + // secret from Vault. If a lease is due to expire in 15 seconds, Consul + // Template will read a new secret at that time minus this value. + DefaultVaultGrace = 15 * time.Second + + // DefaultVaultRenewToken is the default value for if the Vault token should + // be renewed. + DefaultVaultRenewToken = true + + // DefaultVaultUnwrapToken is the default value for if the Vault token should + // be unwrapped. + DefaultVaultUnwrapToken = false + + // DefaultVaultRetryBase is the default value for the base time to use for + // exponential backoff. + DefaultVaultRetryBase = 250 * time.Millisecond + + // DefaultVaultRetryMaxAttempts is the default maximum number of attempts to + // retry before quitting. + DefaultVaultRetryMaxAttempts = 5 +) + +// VaultConfig is the configuration for connecting to a vault server. +type VaultConfig struct { + // Address is the URI to the Vault server. + Address *string `mapstructure:"address"` + + // Enabled controls whether the Vault integration is active. + Enabled *bool `mapstructure:"enabled"` + + // Grace is the amount of time before a lease is about to expire to force a + // new secret to be read. + Grace *time.Duration `mapstructure:"grace"` + + // Namespace is the Vault namespace to use for reading/writing secrets. This can + // also be set via the VAULT_NAMESPACE environment variable. + Namespace *string `mapstructure:"namespace"` + + // RenewToken renews the Vault token. + RenewToken *bool `mapstructure:"renew_token"` + + // Retry is the configuration for specifying how to behave on failure. + Retry *RetryConfig `mapstructure:"retry"` + + // SSL indicates we should use a secure connection while talking to Vault. + SSL *SSLConfig `mapstructure:"ssl"` + + // Token is the Vault token to communicate with for requests. It may be + // a wrapped token or a real token. This can also be set via the VAULT_TOKEN + // environment variable, or via the VaultAgentTokenFile. + Token *string `mapstructure:"token" json:"-"` + + // VaultAgentTokenFile is the path of file that contains a Vault Agent token. + // If vault_agent_token_file is specified: + // - Consul Template will not try to renew the Vault token. + // - Consul Template will periodically stat the file and update the token if it has + // changed. + VaultAgentTokenFile *string `mapstructure:"vault_agent_token_file" json:"-"` + + // Transport configures the low-level network connection details. + Transport *TransportConfig `mapstructure:"transport"` + + // UnwrapToken unwraps the provided Vault token as a wrapped token. + UnwrapToken *bool `mapstructure:"unwrap_token"` +} + +// DefaultVaultConfig returns a configuration that is populated with the +// default values. +func DefaultVaultConfig() *VaultConfig { + v := &VaultConfig{ + Retry: DefaultRetryConfig(), + SSL: DefaultSSLConfig(), + Transport: DefaultTransportConfig(), + } + + // Force SSL when communicating with Vault. + v.SSL.Enabled = Bool(true) + + return v +} + +// Copy returns a deep copy of this configuration. +func (c *VaultConfig) Copy() *VaultConfig { + if c == nil { + return nil + } + + var o VaultConfig + o.Address = c.Address + + o.Enabled = c.Enabled + + o.Grace = c.Grace + + o.Namespace = c.Namespace + + o.RenewToken = c.RenewToken + + if c.Retry != nil { + o.Retry = c.Retry.Copy() + } + + if c.SSL != nil { + o.SSL = c.SSL.Copy() + } + + o.Token = c.Token + + o.VaultAgentTokenFile = c.VaultAgentTokenFile + + if c.Transport != nil { + o.Transport = c.Transport.Copy() + } + + o.UnwrapToken = c.UnwrapToken + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *VaultConfig) Merge(o *VaultConfig) *VaultConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Address != nil { + r.Address = o.Address + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Grace != nil { + r.Grace = o.Grace + } + + if o.Namespace != nil { + r.Namespace = o.Namespace + } + + if o.RenewToken != nil { + r.RenewToken = o.RenewToken + } + + if o.Retry != nil { + r.Retry = r.Retry.Merge(o.Retry) + } + + if o.SSL != nil { + r.SSL = r.SSL.Merge(o.SSL) + } + + if o.Token != nil { + r.Token = o.Token + } + + if o.VaultAgentTokenFile != nil { + r.VaultAgentTokenFile = o.VaultAgentTokenFile + } + + if o.Transport != nil { + r.Transport = r.Transport.Merge(o.Transport) + } + + if o.UnwrapToken != nil { + r.UnwrapToken = o.UnwrapToken + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *VaultConfig) Finalize() { + if c.Address == nil { + c.Address = stringFromEnv([]string{ + api.EnvVaultAddress, + }, "") + } + + if c.Grace == nil { + c.Grace = TimeDuration(DefaultVaultGrace) + } + + if c.Namespace == nil { + c.Namespace = stringFromEnv([]string{"VAULT_NAMESPACE"}, "") + } + + if c.RenewToken == nil { + default_renew := DefaultVaultRenewToken + if c.VaultAgentTokenFile != nil { + default_renew = false + } + c.RenewToken = boolFromEnv([]string{ + "VAULT_RENEW_TOKEN", + }, default_renew) + } + + if c.Retry == nil { + c.Retry = DefaultRetryConfig() + } + c.Retry.Finalize() + + // Vault has custom SSL settings + if c.SSL == nil { + c.SSL = DefaultSSLConfig() + } + if c.SSL.Enabled == nil { + c.SSL.Enabled = Bool(true) + } + if c.SSL.CaCert == nil { + c.SSL.CaCert = stringFromEnv([]string{api.EnvVaultCACert}, "") + } + if c.SSL.CaPath == nil { + c.SSL.CaPath = stringFromEnv([]string{api.EnvVaultCAPath}, "") + } + if c.SSL.Cert == nil { + c.SSL.Cert = stringFromEnv([]string{api.EnvVaultClientCert}, "") + } + if c.SSL.Key == nil { + c.SSL.Key = stringFromEnv([]string{api.EnvVaultClientKey}, "") + } + if c.SSL.ServerName == nil { + c.SSL.ServerName = stringFromEnv([]string{api.EnvVaultTLSServerName}, "") + } + if c.SSL.Verify == nil { + c.SSL.Verify = antiboolFromEnv([]string{ + EnvVaultSkipVerify, api.EnvVaultInsecure}, true) + } + c.SSL.Finalize() + + // Order of precedence + // 1. `vault_agent_token_file` configuration value + // 2. `token` configuration value` + // 3. `VAULT_TOKEN` environment variable + if c.Token == nil { + c.Token = stringFromEnv([]string{ + "VAULT_TOKEN", + }, "") + } + + if c.VaultAgentTokenFile == nil { + if StringVal(c.Token) == "" { + if homePath != "" { + c.Token = stringFromFile([]string{ + homePath + "/.vault-token", + }, "") + } + } + } else { + c.Token = stringFromFile([]string{*c.VaultAgentTokenFile}, "") + } + + if c.Transport == nil { + c.Transport = DefaultTransportConfig() + } + c.Transport.Finalize() + + if c.UnwrapToken == nil { + c.UnwrapToken = boolFromEnv([]string{ + "VAULT_UNWRAP_TOKEN", + }, DefaultVaultUnwrapToken) + } + + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Address)) + } +} + +// GoString defines the printable version of this struct. +func (c *VaultConfig) GoString() string { + if c == nil { + return "(*VaultConfig)(nil)" + } + + return fmt.Sprintf("&VaultConfig{"+ + "Address:%s, "+ + "Enabled:%s, "+ + "Grace:%s, "+ + "Namespace:%s,"+ + "RenewToken:%s, "+ + "Retry:%#v, "+ + "SSL:%#v, "+ + "Token:%t, "+ + "VaultAgentTokenFile:%t, "+ + "Transport:%#v, "+ + "UnwrapToken:%s"+ + "}", + StringGoString(c.Address), + BoolGoString(c.Enabled), + TimeDurationGoString(c.Grace), + StringGoString(c.Namespace), + BoolGoString(c.RenewToken), + c.Retry, + c.SSL, + StringPresent(c.Token), + StringPresent(c.VaultAgentTokenFile), + c.Transport, + BoolGoString(c.UnwrapToken), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/wait.go b/vendor/github.com/hashicorp/consul-template/config/wait.go new file mode 100644 index 000000000..8e3d56c19 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/wait.go @@ -0,0 +1,191 @@ +package config + +import ( + "errors" + "fmt" + "strings" + "time" +) + +var ( + // ErrWaitStringEmpty is the error returned when wait is specified as an empty + // string. + ErrWaitStringEmpty = errors.New("wait: cannot be empty") + + // ErrWaitInvalidFormat is the error returned when the wait is specified + // incorrectly. + ErrWaitInvalidFormat = errors.New("wait: invalid format") + + // ErrWaitNegative is the error returned with the wait is negative. + ErrWaitNegative = errors.New("wait: cannot be negative") + + // ErrWaitMinLTMax is the error returned with the minimum wait time is not + // less than the maximum wait time. + ErrWaitMinLTMax = errors.New("wait: min must be less than max") +) + +// WaitConfig is the Min/Max duration used by the Watcher +type WaitConfig struct { + // Enabled determines if this wait is enabled. + Enabled *bool `mapstructure:"bool"` + + // Min and Max are the minimum and maximum time, respectively, to wait for + // data changes before rendering a new template to disk. + Min *time.Duration `mapstructure:"min"` + Max *time.Duration `mapstructure:"max"` +} + +// DefaultWaitConfig is the default configuration. +func DefaultWaitConfig() *WaitConfig { + return &WaitConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *WaitConfig) Copy() *WaitConfig { + if c == nil { + return nil + } + + var o WaitConfig + o.Enabled = c.Enabled + o.Min = c.Min + o.Max = c.Max + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *WaitConfig) Merge(o *WaitConfig) *WaitConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Min != nil { + r.Min = o.Min + } + + if o.Max != nil { + r.Max = o.Max + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *WaitConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(TimeDurationPresent(c.Min)) + } + + if c.Min == nil { + c.Min = TimeDuration(0 * time.Second) + } + + if c.Max == nil { + c.Max = TimeDuration(4 * *c.Min) + } +} + +// GoString defines the printable version of this struct. +func (c *WaitConfig) GoString() string { + if c == nil { + return "(*WaitConfig)(nil)" + } + + return fmt.Sprintf("&WaitConfig{"+ + "Enabled:%s, "+ + "Min:%s, "+ + "Max:%s"+ + "}", + BoolGoString(c.Enabled), + TimeDurationGoString(c.Min), + TimeDurationGoString(c.Max), + ) +} + +// ParseWaitConfig parses a string of the format `minimum(:maximum)` into a +// WaitConfig. +func ParseWaitConfig(s string) (*WaitConfig, error) { + s = strings.TrimSpace(s) + if len(s) < 1 { + return nil, ErrWaitStringEmpty + } + + parts := strings.Split(s, ":") + + var min, max time.Duration + var err error + + switch len(parts) { + case 1: + min, err = time.ParseDuration(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + + max = 4 * min + case 2: + min, err = time.ParseDuration(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + + max, err = time.ParseDuration(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + default: + return nil, ErrWaitInvalidFormat + } + + if min < 0 || max < 0 { + return nil, ErrWaitNegative + } + + if max < min { + return nil, ErrWaitMinLTMax + } + + var c WaitConfig + c.Min = TimeDuration(min) + c.Max = TimeDuration(max) + + return &c, nil +} + +// WaitVar implements the Flag.Value interface and allows the user to specify +// a watch interval using Go's flag parsing library. +type WaitVar WaitConfig + +// Set sets the value in the format min[:max] for a wait timer. +func (w *WaitVar) Set(value string) error { + wait, err := ParseWaitConfig(value) + if err != nil { + return err + } + + w.Min = wait.Min + w.Max = wait.Max + + return nil +} + +// String returns the string format for this wait variable +func (w *WaitVar) String() string { + return fmt.Sprintf("%s:%s", w.Min, w.Max) +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go new file mode 100644 index 000000000..a78d33cf2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go @@ -0,0 +1,112 @@ +package dependency + +import ( + "log" + "net/url" + "sort" + "time" + + "github.com/hashicorp/consul/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogDatacentersQuery)(nil) + + // CatalogDatacentersQuerySleepTime is the amount of time to sleep between + // queries, since the endpoint does not support blocking queries. + CatalogDatacentersQuerySleepTime = 15 * time.Second +) + +// CatalogDatacentersQuery is the dependency to query all datacenters +type CatalogDatacentersQuery struct { + ignoreFailing bool + + stopCh chan struct{} +} + +// NewCatalogDatacentersQuery creates a new datacenter dependency. +func NewCatalogDatacentersQuery(ignoreFailing bool) (*CatalogDatacentersQuery, error) { + return &CatalogDatacentersQuery{ + ignoreFailing: ignoreFailing, + stopCh: make(chan struct{}, 1), + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of strings representing the datacenters +func (d *CatalogDatacentersQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + opts = opts.Merge(&QueryOptions{}) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/datacenters", + RawQuery: opts.String(), + }) + + // This is pretty ghetto, but the datacenters endpoint does not support + // blocking queries, so we are going to "fake it until we make it". When we + // first query, the LastIndex will be "0", meaning we should immediately + // return data, but future calls will include a LastIndex. If we have a + // LastIndex in the query metadata, sleep for 15 seconds before asking Consul + // again. + // + // This is probably okay given the frequency in which datacenters actually + // change, but is technically not edge-triggering. + if opts.WaitIndex != 0 { + log.Printf("[TRACE] %s: long polling for %s", d, CatalogDatacentersQuerySleepTime) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(CatalogDatacentersQuerySleepTime): + } + } + + result, err := clients.Consul().Catalog().Datacenters() + if err != nil { + return nil, nil, errors.Wrapf(err, d.String()) + } + + // If the user opted in for skipping "down" datacenters, figure out which + // datacenters are down. + if d.ignoreFailing { + dcs := make([]string, 0, len(result)) + for _, dc := range result { + if _, _, err := clients.Consul().Catalog().Services(&api.QueryOptions{ + Datacenter: dc, + AllowStale: false, + RequireConsistent: true, + }); err == nil { + dcs = append(dcs, dc) + } + } + result = dcs + } + + log.Printf("[TRACE] %s: returned %d results", d, len(result)) + + sort.Strings(result) + + return respWithMetadata(result) +} + +// CanShare returns if this dependency is shareable. +func (d *CatalogDatacentersQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogDatacentersQuery) String() string { + return "catalog.datacenters" +} + +// Stop terminates this dependency's fetch. +func (d *CatalogDatacentersQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogDatacentersQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go new file mode 100644 index 000000000..12ef7633d --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go @@ -0,0 +1,181 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "sort" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogNodeQuery)(nil) + + // CatalogNodeQueryRe is the regular expression to use. + CatalogNodeQueryRe = regexp.MustCompile(`\A` + nodeNameRe + dcRe + `\z`) +) + +func init() { + gob.Register([]*CatalogNode{}) + gob.Register([]*CatalogNodeService{}) +} + +// CatalogNodeQuery represents a single node from the Consul catalog. +type CatalogNodeQuery struct { + stopCh chan struct{} + + dc string + name string +} + +// CatalogNode is a wrapper around the node and its services. +type CatalogNode struct { + Node *Node + Services []*CatalogNodeService +} + +// CatalogNodeService is a service on a single node. +type CatalogNodeService struct { + ID string + Service string + Tags ServiceTags + Meta map[string]string + Port int + Address string + EnableTagOverride bool +} + +// NewCatalogNodeQuery parses the given string into a dependency. If the name is +// empty then the name of the local agent is used. +func NewCatalogNodeQuery(s string) (*CatalogNodeQuery, error) { + if s != "" && !CatalogNodeQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.node: invalid format: %q", s) + } + + m := regexpMatch(CatalogNodeQueryRe, s) + return &CatalogNodeQuery{ + dc: m["dc"], + name: m["name"], + stopCh: make(chan struct{}, 1), + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a +// of CatalogNode object. +func (d *CatalogNodeQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + // Grab the name + name := d.name + + if name == "" { + log.Printf("[TRACE] %s: getting local agent name", d) + var err error + name, err = clients.Consul().Agent().NodeName() + if err != nil { + return nil, nil, errors.Wrapf(err, d.String()) + } + } + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/node/" + name, + RawQuery: opts.String(), + }) + node, qm, err := clients.Consul().Catalog().Node(name, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned response", d) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + if node == nil { + log.Printf("[WARN] %s: no node exists with the name %q", d, name) + var node CatalogNode + return &node, rm, nil + } + + services := make([]*CatalogNodeService, 0, len(node.Services)) + for _, v := range node.Services { + services = append(services, &CatalogNodeService{ + ID: v.ID, + Service: v.Service, + Tags: ServiceTags(deepCopyAndSortTags(v.Tags)), + Meta: v.Meta, + Port: v.Port, + Address: v.Address, + EnableTagOverride: v.EnableTagOverride, + }) + } + sort.Stable(ByService(services)) + + detail := &CatalogNode{ + Node: &Node{ + ID: node.Node.ID, + Node: node.Node.Node, + Address: node.Node.Address, + Datacenter: node.Node.Datacenter, + TaggedAddresses: node.Node.TaggedAddresses, + Meta: node.Node.Meta, + }, + Services: services, + } + + return detail, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogNodeQuery) CanShare() bool { + return false +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogNodeQuery) String() string { + name := d.name + if d.dc != "" { + name = name + "@" + d.dc + } + + if name == "" { + return "catalog.node" + } + return fmt.Sprintf("catalog.node(%s)", name) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogNodeQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogNodeQuery) Type() Type { + return TypeConsul +} + +// ByService is a sorter of node services by their service name and then ID. +type ByService []*CatalogNodeService + +func (s ByService) Len() int { return len(s) } +func (s ByService) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByService) Less(i, j int) bool { + if s[i].Service == s[j].Service { + return s[i].ID <= s[j].ID + } + return s[i].Service <= s[j].Service +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go new file mode 100644 index 000000000..d570cf0fa --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go @@ -0,0 +1,150 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "sort" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogNodesQuery)(nil) + + // CatalogNodesQueryRe is the regular expression to use. + CatalogNodesQueryRe = regexp.MustCompile(`\A` + dcRe + nearRe + `\z`) +) + +func init() { + gob.Register([]*Node{}) +} + +// Node is a node entry in Consul +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string +} + +// CatalogNodesQuery is the representation of all registered nodes in Consul. +type CatalogNodesQuery struct { + stopCh chan struct{} + + dc string + near string +} + +// NewCatalogNodesQuery parses the given string into a dependency. If the name is +// empty then the name of the local agent is used. +func NewCatalogNodesQuery(s string) (*CatalogNodesQuery, error) { + if !CatalogNodesQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.nodes: invalid format: %q", s) + } + + m := regexpMatch(CatalogNodesQueryRe, s) + return &CatalogNodesQuery{ + dc: m["dc"], + near: m["near"], + stopCh: make(chan struct{}, 1), + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of Node objects +func (d *CatalogNodesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/nodes", + RawQuery: opts.String(), + }) + n, qm, err := clients.Consul().Catalog().Nodes(opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(n)) + + nodes := make([]*Node, 0, len(n)) + for _, node := range n { + nodes = append(nodes, &Node{ + ID: node.ID, + Node: node.Node, + Address: node.Address, + Datacenter: node.Datacenter, + TaggedAddresses: node.TaggedAddresses, + Meta: node.Meta, + }) + } + + // Sort unless the user explicitly asked for nearness + if d.near == "" { + sort.Stable(ByNode(nodes)) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return nodes, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogNodesQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogNodesQuery) String() string { + name := "" + if d.dc != "" { + name = name + "@" + d.dc + } + if d.near != "" { + name = name + "~" + d.near + } + + if name == "" { + return "catalog.nodes" + } + return fmt.Sprintf("catalog.nodes(%s)", name) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogNodesQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogNodesQuery) Type() Type { + return TypeConsul +} + +// ByNode is a sortable list of nodes by name and then IP address. +type ByNode []*Node + +func (s ByNode) Len() int { return len(s) } +func (s ByNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByNode) Less(i, j int) bool { + if s[i].Node == s[j].Node { + return s[i].Address <= s[j].Address + } + return s[i].Node <= s[j].Node +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go new file mode 100644 index 000000000..8b94a5996 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go @@ -0,0 +1,154 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogServiceQuery)(nil) + + // CatalogServiceQueryRe is the regular expression to use. + CatalogServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + dcRe + nearRe + `\z`) +) + +func init() { + gob.Register([]*CatalogSnippet{}) +} + +// CatalogService is a catalog entry in Consul. +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags ServiceTags + ServiceMeta map[string]string + ServicePort int +} + +// CatalogServiceQuery is the representation of a requested catalog services +// dependency from inside a template. +type CatalogServiceQuery struct { + stopCh chan struct{} + + dc string + name string + near string + tag string +} + +// NewCatalogServiceQuery parses a string into a CatalogServiceQuery. +func NewCatalogServiceQuery(s string) (*CatalogServiceQuery, error) { + if !CatalogServiceQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.service: invalid format: %q", s) + } + + m := regexpMatch(CatalogServiceQueryRe, s) + return &CatalogServiceQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + name: m["name"], + near: m["near"], + tag: m["tag"], + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of CatalogService objects. +func (d *CatalogServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) + + u := &url.URL{ + Path: "/v1/catalog/service/" + d.name, + RawQuery: opts.String(), + } + if d.tag != "" { + q := u.Query() + q.Set("tag", d.tag) + u.RawQuery = q.Encode() + } + log.Printf("[TRACE] %s: GET %s", d, u) + + entries, qm, err := clients.Consul().Catalog().Service(d.name, d.tag, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) + + var list []*CatalogService + for _, s := range entries { + list = append(list, &CatalogService{ + ID: s.ID, + Node: s.Node, + Address: s.Address, + Datacenter: s.Datacenter, + TaggedAddresses: s.TaggedAddresses, + NodeMeta: s.NodeMeta, + ServiceID: s.ServiceID, + ServiceName: s.ServiceName, + ServiceAddress: s.ServiceAddress, + ServiceTags: ServiceTags(deepCopyAndSortTags(s.ServiceTags)), + ServiceMeta: s.ServiceMeta, + ServicePort: s.ServicePort, + }) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return list, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogServiceQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogServiceQuery) String() string { + name := d.name + if d.tag != "" { + name = d.tag + "." + name + } + if d.dc != "" { + name = name + "@" + d.dc + } + if d.near != "" { + name = name + "~" + d.near + } + return fmt.Sprintf("catalog.service(%s)", name) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogServiceQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogServiceQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go new file mode 100644 index 000000000..06ce03a77 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go @@ -0,0 +1,129 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "sort" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogServicesQuery)(nil) + + // CatalogServicesQueryRe is the regular expression to use for CatalogNodesQuery. + CatalogServicesQueryRe = regexp.MustCompile(`\A` + dcRe + `\z`) +) + +func init() { + gob.Register([]*CatalogSnippet{}) +} + +// CatalogSnippet is a catalog entry in Consul. +type CatalogSnippet struct { + Name string + Tags ServiceTags +} + +// CatalogServicesQuery is the representation of a requested catalog service +// dependency from inside a template. +type CatalogServicesQuery struct { + stopCh chan struct{} + + dc string +} + +// NewCatalogServicesQuery parses a string of the format @dc. +func NewCatalogServicesQuery(s string) (*CatalogServicesQuery, error) { + if !CatalogServicesQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.services: invalid format: %q", s) + } + + m := regexpMatch(CatalogServicesQueryRe, s) + return &CatalogServicesQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of CatalogService objects. +func (d *CatalogServicesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/services", + RawQuery: opts.String(), + }) + + entries, qm, err := clients.Consul().Catalog().Services(opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) + + var catalogServices []*CatalogSnippet + for name, tags := range entries { + catalogServices = append(catalogServices, &CatalogSnippet{ + Name: name, + Tags: ServiceTags(deepCopyAndSortTags(tags)), + }) + } + + sort.Stable(ByName(catalogServices)) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return catalogServices, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogServicesQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogServicesQuery) String() string { + if d.dc != "" { + return fmt.Sprintf("catalog.services(@%s)", d.dc) + } + return "catalog.services" +} + +// Stop halts the dependency's fetch function. +func (d *CatalogServicesQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogServicesQuery) Type() Type { + return TypeConsul +} + +// ByName is a sortable slice of CatalogService structs. +type ByName []*CatalogSnippet + +func (s ByName) Len() int { return len(s) } +func (s ByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByName) Less(i, j int) bool { + if s[i].Name <= s[j].Name { + return true + } + return false +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go new file mode 100644 index 000000000..e2bceb773 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go @@ -0,0 +1,338 @@ +package dependency + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http" + "sync" + "time" + + consulapi "github.com/hashicorp/consul/api" + rootcerts "github.com/hashicorp/go-rootcerts" + vaultapi "github.com/hashicorp/vault/api" +) + +// ClientSet is a collection of clients that dependencies use to communicate +// with remote services like Consul or Vault. +type ClientSet struct { + sync.RWMutex + + vault *vaultClient + consul *consulClient +} + +// consulClient is a wrapper around a real Consul API client. +type consulClient struct { + client *consulapi.Client + transport *http.Transport +} + +// vaultClient is a wrapper around a real Vault API client. +type vaultClient struct { + client *vaultapi.Client + httpClient *http.Client +} + +// CreateConsulClientInput is used as input to the CreateConsulClient function. +type CreateConsulClientInput struct { + Address string + Token string + AuthEnabled bool + AuthUsername string + AuthPassword string + SSLEnabled bool + SSLVerify bool + SSLCert string + SSLKey string + SSLCACert string + SSLCAPath string + ServerName string + + TransportDialKeepAlive time.Duration + TransportDialTimeout time.Duration + TransportDisableKeepAlives bool + TransportIdleConnTimeout time.Duration + TransportMaxIdleConns int + TransportMaxIdleConnsPerHost int + TransportTLSHandshakeTimeout time.Duration +} + +// CreateVaultClientInput is used as input to the CreateVaultClient function. +type CreateVaultClientInput struct { + Address string + Namespace string + Token string + UnwrapToken bool + SSLEnabled bool + SSLVerify bool + SSLCert string + SSLKey string + SSLCACert string + SSLCAPath string + ServerName string + + TransportDialKeepAlive time.Duration + TransportDialTimeout time.Duration + TransportDisableKeepAlives bool + TransportIdleConnTimeout time.Duration + TransportMaxIdleConns int + TransportMaxIdleConnsPerHost int + TransportTLSHandshakeTimeout time.Duration +} + +// NewClientSet creates a new client set that is ready to accept clients. +func NewClientSet() *ClientSet { + return &ClientSet{} +} + +// CreateConsulClient creates a new Consul API client from the given input. +func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { + consulConfig := consulapi.DefaultConfig() + + if i.Address != "" { + consulConfig.Address = i.Address + } + + if i.Token != "" { + consulConfig.Token = i.Token + } + + if i.AuthEnabled { + consulConfig.HttpAuth = &consulapi.HttpBasicAuth{ + Username: i.AuthUsername, + Password: i.AuthPassword, + } + } + + // This transport will attempt to keep connections open to the Consul server. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: i.TransportDialTimeout, + KeepAlive: i.TransportDialKeepAlive, + }).Dial, + DisableKeepAlives: i.TransportDisableKeepAlives, + MaxIdleConns: i.TransportMaxIdleConns, + IdleConnTimeout: i.TransportIdleConnTimeout, + MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost, + TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout, + } + + // Configure SSL + if i.SSLEnabled { + consulConfig.Scheme = "https" + + var tlsConfig tls.Config + + // Custom certificate or certificate and key + if i.SSLCert != "" && i.SSLKey != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } else if i.SSLCert != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + // Custom CA certificate + if i.SSLCACert != "" || i.SSLCAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: i.SSLCACert, + CAPath: i.SSLCAPath, + } + if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil { + return fmt.Errorf("client set: consul configuring TLS failed: %s", err) + } + } + + // Construct all the certificates now + tlsConfig.BuildNameToCertificate() + + // SSL verification + if i.ServerName != "" { + tlsConfig.ServerName = i.ServerName + tlsConfig.InsecureSkipVerify = false + } + if !i.SSLVerify { + log.Printf("[WARN] (clients) disabling consul SSL verification") + tlsConfig.InsecureSkipVerify = true + } + + // Save the TLS config on our transport + transport.TLSClientConfig = &tlsConfig + } + + // Setup the new transport + consulConfig.Transport = transport + + // Create the API client + client, err := consulapi.NewClient(consulConfig) + if err != nil { + return fmt.Errorf("client set: consul: %s", err) + } + + // Save the data on ourselves + c.Lock() + c.consul = &consulClient{ + client: client, + transport: transport, + } + c.Unlock() + + return nil +} + +func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { + vaultConfig := vaultapi.DefaultConfig() + + if i.Address != "" { + vaultConfig.Address = i.Address + } + + // This transport will attempt to keep connections open to the Vault server. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: i.TransportDialTimeout, + KeepAlive: i.TransportDialKeepAlive, + }).Dial, + DisableKeepAlives: i.TransportDisableKeepAlives, + MaxIdleConns: i.TransportMaxIdleConns, + IdleConnTimeout: i.TransportIdleConnTimeout, + MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost, + TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout, + } + + // Configure SSL + if i.SSLEnabled { + var tlsConfig tls.Config + + // Custom certificate or certificate and key + if i.SSLCert != "" && i.SSLKey != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } else if i.SSLCert != "" { + cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + // Custom CA certificate + if i.SSLCACert != "" || i.SSLCAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: i.SSLCACert, + CAPath: i.SSLCAPath, + } + if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil { + return fmt.Errorf("client set: vault configuring TLS failed: %s", err) + } + } + + // Construct all the certificates now + tlsConfig.BuildNameToCertificate() + + // SSL verification + if i.ServerName != "" { + tlsConfig.ServerName = i.ServerName + tlsConfig.InsecureSkipVerify = false + } + if !i.SSLVerify { + log.Printf("[WARN] (clients) disabling vault SSL verification") + tlsConfig.InsecureSkipVerify = true + } + + // Save the TLS config on our transport + transport.TLSClientConfig = &tlsConfig + } + + // Setup the new transport + vaultConfig.HttpClient.Transport = transport + + // Create the client + client, err := vaultapi.NewClient(vaultConfig) + if err != nil { + return fmt.Errorf("client set: vault: %s", err) + } + + // Set the namespace if given. + if i.Namespace != "" { + client.SetNamespace(i.Namespace) + } + + // Set the token if given + if i.Token != "" { + client.SetToken(i.Token) + } + + // Check if we are unwrapping + if i.UnwrapToken { + secret, err := client.Logical().Unwrap(i.Token) + if err != nil { + return fmt.Errorf("client set: vault unwrap: %s", err) + } + + if secret == nil { + return fmt.Errorf("client set: vault unwrap: no secret") + } + + if secret.Auth == nil { + return fmt.Errorf("client set: vault unwrap: no secret auth") + } + + if secret.Auth.ClientToken == "" { + return fmt.Errorf("client set: vault unwrap: no token returned") + } + + client.SetToken(secret.Auth.ClientToken) + } + + // Save the data on ourselves + c.Lock() + c.vault = &vaultClient{ + client: client, + httpClient: vaultConfig.HttpClient, + } + c.Unlock() + + return nil +} + +// Consul returns the Consul client for this set. +func (c *ClientSet) Consul() *consulapi.Client { + c.RLock() + defer c.RUnlock() + return c.consul.client +} + +// Vault returns the Vault client for this set. +func (c *ClientSet) Vault() *vaultapi.Client { + c.RLock() + defer c.RUnlock() + return c.vault.client +} + +// Stop closes all idle connections for any attached clients. +func (c *ClientSet) Stop() { + c.Lock() + defer c.Unlock() + + if c.consul != nil { + c.consul.transport.CloseIdleConnections() + } + + if c.vault != nil { + c.vault.httpClient.Transport.(*http.Transport).CloseIdleConnections() + } +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/dependency.go b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go new file mode 100644 index 000000000..c9161f82f --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go @@ -0,0 +1,189 @@ +package dependency + +import ( + "net/url" + "regexp" + "sort" + "strconv" + "time" + + consulapi "github.com/hashicorp/consul/api" +) + +const ( + dcRe = `(@(?P[[:word:]\.\-\_]+))?` + keyRe = `/?(?P[^@]+)` + filterRe = `(\|(?P[[:word:]\,]+))?` + serviceNameRe = `(?P[[:word:]\-\_]+)` + nodeNameRe = `(?P[[:word:]\.\-\_]+)` + nearRe = `(~(?P[[:word:]\.\-\_]+))?` + prefixRe = `/?(?P[^@]+)` + tagRe = `((?P[[:word:]=:\.\-\_]+)\.)?` +) + +type Type int + +const ( + TypeConsul Type = iota + TypeVault + TypeLocal +) + +// Dependency is an interface for a dependency that Consul Template is capable +// of watching. +type Dependency interface { + Fetch(*ClientSet, *QueryOptions) (interface{}, *ResponseMetadata, error) + CanShare() bool + String() string + Stop() + Type() Type +} + +// ServiceTags is a slice of tags assigned to a Service +type ServiceTags []string + +// QueryOptions is a list of options to send with the query. These options are +// client-agnostic, and the dependency determines which, if any, of the options +// to use. +type QueryOptions struct { + AllowStale bool + Datacenter string + Near string + RequireConsistent bool + VaultGrace time.Duration + WaitIndex uint64 + WaitTime time.Duration +} + +func (q *QueryOptions) Merge(o *QueryOptions) *QueryOptions { + var r QueryOptions + + if q == nil { + if o == nil { + return &QueryOptions{} + } + r = *o + return &r + } + + r = *q + + if o == nil { + return &r + } + + if o.AllowStale != false { + r.AllowStale = o.AllowStale + } + + if o.Datacenter != "" { + r.Datacenter = o.Datacenter + } + + if o.Near != "" { + r.Near = o.Near + } + + if o.RequireConsistent != false { + r.RequireConsistent = o.RequireConsistent + } + + if o.WaitIndex != 0 { + r.WaitIndex = o.WaitIndex + } + + if o.WaitTime != 0 { + r.WaitTime = o.WaitTime + } + + return &r +} + +func (q *QueryOptions) ToConsulOpts() *consulapi.QueryOptions { + return &consulapi.QueryOptions{ + AllowStale: q.AllowStale, + Datacenter: q.Datacenter, + Near: q.Near, + RequireConsistent: q.RequireConsistent, + WaitIndex: q.WaitIndex, + WaitTime: q.WaitTime, + } +} + +func (q *QueryOptions) String() string { + u := &url.Values{} + + if q.AllowStale { + u.Add("stale", strconv.FormatBool(q.AllowStale)) + } + + if q.Datacenter != "" { + u.Add("dc", q.Datacenter) + } + + if q.Near != "" { + u.Add("near", q.Near) + } + + if q.RequireConsistent { + u.Add("consistent", strconv.FormatBool(q.RequireConsistent)) + } + + if q.WaitIndex != 0 { + u.Add("index", strconv.FormatUint(q.WaitIndex, 10)) + } + + if q.WaitTime != 0 { + u.Add("wait", q.WaitTime.String()) + } + + return u.Encode() +} + +// ResponseMetadata is a struct that contains metadata about the response. This +// is returned from a Fetch function call. +type ResponseMetadata struct { + LastIndex uint64 + LastContact time.Duration + Block bool +} + +// deepCopyAndSortTags deep copies the tags in the given string slice and then +// sorts and returns the copied result. +func deepCopyAndSortTags(tags []string) []string { + newTags := make([]string, 0, len(tags)) + for _, tag := range tags { + newTags = append(newTags, tag) + } + sort.Strings(newTags) + return newTags +} + +// respWithMetadata is a short wrapper to return the given interface with fake +// response metadata for non-Consul dependencies. +func respWithMetadata(i interface{}) (interface{}, *ResponseMetadata, error) { + return i, &ResponseMetadata{ + LastContact: 0, + LastIndex: uint64(time.Now().Unix()), + }, nil +} + +// regexpMatch matches the given regexp and extracts the match groups into a +// named map. +func regexpMatch(re *regexp.Regexp, q string) map[string]string { + names := re.SubexpNames() + match := re.FindAllStringSubmatch(q, -1) + + if len(match) == 0 { + return map[string]string{} + } + + m := map[string]string{} + for i, n := range match[0] { + if names[i] != "" { + m[names[i]] = n + } + } + + return m +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/errors.go b/vendor/github.com/hashicorp/consul-template/dependency/errors.go new file mode 100644 index 000000000..dd03ac877 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/errors.go @@ -0,0 +1,13 @@ +package dependency + +import "errors" + +// ErrStopped is a special error that is returned when a dependency is +// prematurely stopped, usually due to a configuration reload or a process +// interrupt. +var ErrStopped = errors.New("dependency stopped") + +// ErrContinue is a special error which says to continue (retry) on error. +var ErrContinue = errors.New("dependency continue") + +var ErrLeaseExpired = errors.New("lease expired or is not renewable") diff --git a/vendor/github.com/hashicorp/consul-template/dependency/file.go b/vendor/github.com/hashicorp/consul-template/dependency/file.go new file mode 100644 index 000000000..3f9fb52e8 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/file.go @@ -0,0 +1,129 @@ +package dependency + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*FileQuery)(nil) + + // FileQuerySleepTime is the amount of time to sleep between queries, since + // the fsnotify library is not compatible with solaris and other OSes yet. + FileQuerySleepTime = 2 * time.Second +) + +// FileQuery represents a local file dependency. +type FileQuery struct { + stopCh chan struct{} + + path string + stat os.FileInfo +} + +// NewFileQuery creates a file dependency from the given path. +func NewFileQuery(s string) (*FileQuery, error) { + s = strings.TrimSpace(s) + if s == "" { + return nil, fmt.Errorf("file: invalid format: %q", s) + } + + return &FileQuery{ + stopCh: make(chan struct{}, 1), + path: s, + }, nil +} + +// Fetch retrieves this dependency and returns the result or any errors that +// occur in the process. +func (d *FileQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + log.Printf("[TRACE] %s: READ %s", d, d.path) + + select { + case <-d.stopCh: + log.Printf("[TRACE] %s: stopped", d) + return "", nil, ErrStopped + case r := <-d.watch(d.stat): + if r.err != nil { + return "", nil, errors.Wrap(r.err, d.String()) + } + + log.Printf("[TRACE] %s: reported change", d) + + data, err := ioutil.ReadFile(d.path) + if err != nil { + return "", nil, errors.Wrap(err, d.String()) + } + + d.stat = r.stat + return respWithMetadata(string(data)) + } +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *FileQuery) CanShare() bool { + return false +} + +// Stop halts the dependency's fetch function. +func (d *FileQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *FileQuery) String() string { + return fmt.Sprintf("file(%s)", d.path) +} + +// Type returns the type of this dependency. +func (d *FileQuery) Type() Type { + return TypeLocal +} + +type watchResult struct { + stat os.FileInfo + err error +} + +// watch watchers the file for changes +func (d *FileQuery) watch(lastStat os.FileInfo) <-chan *watchResult { + ch := make(chan *watchResult, 1) + + go func(lastStat os.FileInfo) { + for { + stat, err := os.Stat(d.path) + if err != nil { + select { + case <-d.stopCh: + return + case ch <- &watchResult{err: err}: + return + } + } + + changed := lastStat == nil || + lastStat.Size() != stat.Size() || + lastStat.ModTime() != stat.ModTime() + + if changed { + select { + case <-d.stopCh: + return + case ch <- &watchResult{stat: stat}: + return + } + } + + time.Sleep(FileQuerySleepTime) + } + }(lastStat) + + return ch +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go new file mode 100644 index 000000000..215f53d0d --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go @@ -0,0 +1,248 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "sort" + "strings" + + "github.com/hashicorp/consul/api" + "github.com/pkg/errors" +) + +const ( + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" + + NodeMaint = "_node_maintenance" + ServiceMaint = "_service_maintenance:" +) + +var ( + // Ensure implements + _ Dependency = (*HealthServiceQuery)(nil) + + // HealthServiceQueryRe is the regular expression to use. + HealthServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + dcRe + nearRe + filterRe + `\z`) +) + +func init() { + gob.Register([]*HealthService{}) +} + +// HealthService is a service entry in Consul. +type HealthService struct { + Node string + NodeID string + NodeAddress string + NodeTaggedAddresses map[string]string + NodeMeta map[string]string + ServiceMeta map[string]string + Address string + ID string + Name string + Tags ServiceTags + Checks api.HealthChecks + Status string + Port int +} + +// HealthServiceQuery is the representation of all a service query in Consul. +type HealthServiceQuery struct { + stopCh chan struct{} + + dc string + filters []string + name string + near string + tag string +} + +// NewHealthServiceQuery processes the strings to build a service dependency. +func NewHealthServiceQuery(s string) (*HealthServiceQuery, error) { + if !HealthServiceQueryRe.MatchString(s) { + return nil, fmt.Errorf("health.service: invalid format: %q", s) + } + + m := regexpMatch(HealthServiceQueryRe, s) + + var filters []string + if filter := m["filter"]; filter != "" { + split := strings.Split(filter, ",") + for _, f := range split { + f = strings.TrimSpace(f) + switch f { + case HealthAny, + HealthPassing, + HealthWarning, + HealthCritical, + HealthMaint: + filters = append(filters, f) + case "": + default: + return nil, fmt.Errorf("health.service: invalid filter: %q in %q", f, s) + } + } + sort.Strings(filters) + } else { + filters = []string{HealthPassing} + } + + return &HealthServiceQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + filters: filters, + name: m["name"], + near: m["near"], + tag: m["tag"], + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of HealthService objects. +func (d *HealthServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) + + u := &url.URL{ + Path: "/v1/health/service/" + d.name, + RawQuery: opts.String(), + } + if d.tag != "" { + q := u.Query() + q.Set("tag", d.tag) + u.RawQuery = q.Encode() + } + log.Printf("[TRACE] %s: GET %s", d, u) + + // Check if a user-supplied filter was given. If so, we may be querying for + // more than healthy services, so we need to implement client-side filtering. + passingOnly := len(d.filters) == 1 && d.filters[0] == HealthPassing + + entries, qm, err := clients.Consul().Health().Service(d.name, d.tag, passingOnly, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) + + list := make([]*HealthService, 0, len(entries)) + for _, entry := range entries { + // Get the status of this service from its checks. + status := entry.Checks.AggregatedStatus() + + // If we are not checking only healthy services, filter out services that do + // not match the given filter. + if !acceptStatus(d.filters, status) { + continue + } + + // Get the address of the service, falling back to the address of the node. + address := entry.Service.Address + if address == "" { + address = entry.Node.Address + } + + list = append(list, &HealthService{ + Node: entry.Node.Node, + NodeID: entry.Node.ID, + NodeAddress: entry.Node.Address, + NodeTaggedAddresses: entry.Node.TaggedAddresses, + NodeMeta: entry.Node.Meta, + ServiceMeta: entry.Service.Meta, + Address: address, + ID: entry.Service.ID, + Name: entry.Service.Service, + Tags: ServiceTags(deepCopyAndSortTags(entry.Service.Tags)), + Status: status, + Checks: entry.Checks, + Port: entry.Service.Port, + }) + } + + log.Printf("[TRACE] %s: returned %d results after filtering", d, len(list)) + + // Sort unless the user explicitly asked for nearness + if d.near == "" { + sort.Stable(ByNodeThenID(list)) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return list, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *HealthServiceQuery) CanShare() bool { + return true +} + +// Stop halts the dependency's fetch function. +func (d *HealthServiceQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *HealthServiceQuery) String() string { + name := d.name + if d.tag != "" { + name = d.tag + "." + name + } + if d.dc != "" { + name = name + "@" + d.dc + } + if d.near != "" { + name = name + "~" + d.near + } + if len(d.filters) > 0 { + name = name + "|" + strings.Join(d.filters, ",") + } + return fmt.Sprintf("health.service(%s)", name) +} + +// Type returns the type of this dependency. +func (d *HealthServiceQuery) Type() Type { + return TypeConsul +} + +// acceptStatus allows us to check if a slice of health checks pass this filter. +func acceptStatus(list []string, s string) bool { + for _, status := range list { + if status == s || status == HealthAny { + return true + } + } + return false +} + +// ByNodeThenID is a sortable slice of Service +type ByNodeThenID []*HealthService + +// Len, Swap, and Less are used to implement the sort.Sort interface. +func (s ByNodeThenID) Len() int { return len(s) } +func (s ByNodeThenID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByNodeThenID) Less(i, j int) bool { + if s[i].Node < s[j].Node { + return true + } else if s[i].Node == s[j].Node { + return s[i].ID <= s[j].ID + } + return false +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go new file mode 100644 index 000000000..a075ea5df --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go @@ -0,0 +1,112 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "regexp" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVGetQuery)(nil) + + // KVGetQueryRe is the regular expression to use. + KVGetQueryRe = regexp.MustCompile(`\A` + keyRe + dcRe + `\z`) +) + +// KVGetQuery queries the KV store for a single key. +type KVGetQuery struct { + stopCh chan struct{} + + dc string + key string + block bool +} + +// NewKVGetQuery parses a string into a dependency. +func NewKVGetQuery(s string) (*KVGetQuery, error) { + if s != "" && !KVGetQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.get: invalid format: %q", s) + } + + m := regexpMatch(KVGetQueryRe, s) + return &KVGetQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + key: m["key"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVGetQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.key, + RawQuery: opts.String(), + }) + + pair, qm, err := clients.Consul().KV().Get(d.key, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + Block: d.block, + } + + if pair == nil { + log.Printf("[TRACE] %s: returned nil", d) + return nil, rm, nil + } + + value := string(pair.Value) + log.Printf("[TRACE] %s: returned %q", d, value) + return value, rm, nil +} + +// EnableBlocking turns this into a blocking KV query. +func (d *KVGetQuery) EnableBlocking() { + d.block = true +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVGetQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVGetQuery) String() string { + key := d.key + if d.dc != "" { + key = key + "@" + d.dc + } + + if d.block { + return fmt.Sprintf("kv.block(%s)", key) + } + return fmt.Sprintf("kv.get(%s)", key) +} + +// Stop halts the dependency's fetch function. +func (d *KVGetQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVGetQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go new file mode 100644 index 000000000..60e1ef7e4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go @@ -0,0 +1,104 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVKeysQuery)(nil) + + // KVKeysQueryRe is the regular expression to use. + KVKeysQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`) +) + +// KVKeysQuery queries the KV store for a single key. +type KVKeysQuery struct { + stopCh chan struct{} + + dc string + prefix string +} + +// NewKVKeysQuery parses a string into a dependency. +func NewKVKeysQuery(s string) (*KVKeysQuery, error) { + if s != "" && !KVKeysQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.keys: invalid format: %q", s) + } + + m := regexpMatch(KVKeysQueryRe, s) + return &KVKeysQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + prefix: m["prefix"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVKeysQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.prefix, + RawQuery: opts.String(), + }) + + list, qm, err := clients.Consul().KV().Keys(d.prefix, "", opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + keys := make([]string, len(list)) + for i, v := range list { + v = strings.TrimPrefix(v, d.prefix) + v = strings.TrimLeft(v, "/") + keys[i] = v + } + + log.Printf("[TRACE] %s: returned %d results", d, len(list)) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return keys, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVKeysQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVKeysQuery) String() string { + prefix := d.prefix + if d.dc != "" { + prefix = prefix + "@" + d.dc + } + return fmt.Sprintf("kv.keys(%s)", prefix) +} + +// Stop halts the dependency's fetch function. +func (d *KVKeysQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVKeysQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go new file mode 100644 index 000000000..929dfa423 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go @@ -0,0 +1,133 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVListQuery)(nil) + + // KVListQueryRe is the regular expression to use. + KVListQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`) +) + +func init() { + gob.Register([]*KeyPair{}) +} + +// KeyPair is a simple Key-Value pair +type KeyPair struct { + Path string + Key string + Value string + + // Lesser-used, but still valuable keys from api.KV + CreateIndex uint64 + ModifyIndex uint64 + LockIndex uint64 + Flags uint64 + Session string +} + +// KVListQuery queries the KV store for a single key. +type KVListQuery struct { + stopCh chan struct{} + + dc string + prefix string +} + +// NewKVListQuery parses a string into a dependency. +func NewKVListQuery(s string) (*KVListQuery, error) { + if s != "" && !KVListQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.list: invalid format: %q", s) + } + + m := regexpMatch(KVListQueryRe, s) + return &KVListQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + prefix: m["prefix"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.prefix, + RawQuery: opts.String(), + }) + + list, qm, err := clients.Consul().KV().List(d.prefix, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d pairs", d, len(list)) + + pairs := make([]*KeyPair, 0, len(list)) + for _, pair := range list { + key := strings.TrimPrefix(pair.Key, d.prefix) + key = strings.TrimLeft(key, "/") + + pairs = append(pairs, &KeyPair{ + Path: pair.Key, + Key: key, + Value: string(pair.Value), + CreateIndex: pair.CreateIndex, + ModifyIndex: pair.ModifyIndex, + LockIndex: pair.LockIndex, + Flags: pair.Flags, + Session: pair.Session, + }) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return pairs, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVListQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVListQuery) String() string { + prefix := d.prefix + if d.dc != "" { + prefix = prefix + "@" + d.dc + } + return fmt.Sprintf("kv.list(%s)", prefix) +} + +// Stop halts the dependency's fetch function. +func (d *KVListQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVListQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/set.go b/vendor/github.com/hashicorp/consul-template/dependency/set.go new file mode 100644 index 000000000..d3a5df3ab --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/set.go @@ -0,0 +1,72 @@ +package dependency + +import ( + "strings" + "sync" +) + +// Set is a dependency-specific set implementation. Relative ordering is +// preserved. +type Set struct { + once sync.Once + sync.RWMutex + list []string + set map[string]Dependency +} + +// Add adds a new element to the set if it does not already exist. +func (s *Set) Add(d Dependency) bool { + s.init() + s.Lock() + defer s.Unlock() + if _, ok := s.set[d.String()]; !ok { + s.list = append(s.list, d.String()) + s.set[d.String()] = d + return true + } + return false +} + +// Get retrieves a single element from the set by name. +func (s *Set) Get(v string) Dependency { + s.RLock() + defer s.RUnlock() + return s.set[v] +} + +// List returns the insertion-ordered list of dependencies. +func (s *Set) List() []Dependency { + s.RLock() + defer s.RUnlock() + r := make([]Dependency, len(s.list)) + for i, k := range s.list { + r[i] = s.set[k] + } + return r +} + +// Len is the size of the set. +func (s *Set) Len() int { + s.RLock() + defer s.RUnlock() + return len(s.list) +} + +// String is a string representation of the set. +func (s *Set) String() string { + s.RLock() + defer s.RUnlock() + return strings.Join(s.list, ", ") +} + +func (s *Set) init() { + s.once.Do(func() { + if s.list == nil { + s.list = make([]string, 0, 8) + } + + if s.set == nil { + s.set = make(map[string]Dependency) + } + }) +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go new file mode 100644 index 000000000..1ce339ea6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go @@ -0,0 +1,121 @@ +package dependency + +import ( + "io/ioutil" + "log" + "os" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultAgentTokenQuery)(nil) +) + +const ( + // VaultAgentTokenSleepTime is the amount of time to sleep between queries, since + // the fsnotify library is not compatible with solaris and other OSes yet. + VaultAgentTokenSleepTime = 15 * time.Second +) + +// VaultAgentTokenQuery is the dependency to Vault Agent token +type VaultAgentTokenQuery struct { + stopCh chan struct{} + path string + stat os.FileInfo +} + +// NewVaultAgentTokenQuery creates a new dependency. +func NewVaultAgentTokenQuery(path string) (*VaultAgentTokenQuery, error) { + return &VaultAgentTokenQuery{ + stopCh: make(chan struct{}, 1), + path: path, + }, nil +} + +// Fetch retrieves this dependency and returns the result or any errors that +// occur in the process. +func (d *VaultAgentTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + log.Printf("[TRACE] %s: READ %s", d, d.path) + + select { + case <-d.stopCh: + log.Printf("[TRACE] %s: stopped", d) + return "", nil, ErrStopped + case r := <-d.watch(d.stat): + if r.err != nil { + return "", nil, errors.Wrap(r.err, d.String()) + } + + log.Printf("[TRACE] %s: reported change", d) + + token, err := ioutil.ReadFile(d.path) + if err != nil { + return "", nil, errors.Wrap(err, d.String()) + } + + d.stat = r.stat + clients.Vault().SetToken(strings.TrimSpace(string(token))) + } + + return respWithMetadata("") +} + +// CanShare returns if this dependency is sharable. +func (d *VaultAgentTokenQuery) CanShare() bool { + return false +} + +// Stop halts the dependency's fetch function. +func (d *VaultAgentTokenQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultAgentTokenQuery) String() string { + return "vault-agent.token" +} + +// Type returns the type of this dependency. +func (d *VaultAgentTokenQuery) Type() Type { + return TypeVault +} + +// watch watches the file for changes +func (d *VaultAgentTokenQuery) watch(lastStat os.FileInfo) <-chan *watchResult { + ch := make(chan *watchResult, 1) + + go func(lastStat os.FileInfo) { + for { + stat, err := os.Stat(d.path) + if err != nil { + select { + case <-d.stopCh: + return + case ch <- &watchResult{err: err}: + return + } + } + + changed := lastStat == nil || + lastStat.Size() != stat.Size() || + lastStat.ModTime() != stat.ModTime() + + if changed { + select { + case <-d.stopCh: + return + case ch <- &watchResult{stat: stat}: + return + } + } + + time.Sleep(VaultAgentTokenSleepTime) + } + }(lastStat) + + return ch +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go new file mode 100644 index 000000000..6abe69cfd --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go @@ -0,0 +1,348 @@ +package dependency + +import ( + "log" + "math/rand" + "path" + "strings" + "time" + + "crypto/x509" + "encoding/pem" + + "github.com/hashicorp/vault/api" +) + +var ( + // VaultDefaultLeaseDuration is the default lease duration in seconds. + VaultDefaultLeaseDuration = 5 * time.Minute +) + +// Secret is the structure returned for every secret within Vault. +type Secret struct { + // The request ID that generated this response + RequestID string + + LeaseID string + LeaseDuration int + Renewable bool + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} + + // Warnings contains any warnings related to the operation. These + // are not issues that caused the command to fail, but that the + // client should be aware of. + Warnings []string + + // Auth, if non-nil, means that there was authentication information + // attached to this response. + Auth *SecretAuth + + // WrapInfo, if non-nil, means that the initial response was wrapped in the + // cubbyhole of the given token (which has a TTL of the given number of + // seconds) + WrapInfo *SecretWrapInfo +} + +// SecretAuth is the structure containing auth information if we have it. +type SecretAuth struct { + ClientToken string + Accessor string + Policies []string + Metadata map[string]string + + LeaseDuration int + Renewable bool +} + +// SecretWrapInfo contains wrapping information if we have it. If what is +// contained is an authentication token, the accessor for the token will be +// available in WrappedAccessor. +type SecretWrapInfo struct { + Token string + TTL int + CreationTime time.Time + WrappedAccessor string +} + +// +type renewer interface { + Dependency + stopChan() chan struct{} + secrets() (*Secret, *api.Secret) +} + +func renewSecret(clients *ClientSet, d renewer) error { + log.Printf("[TRACE] %s: starting renewer", d) + + secret, vaultSecret := d.secrets() + renewer, err := clients.Vault().NewRenewer(&api.RenewerInput{ + Secret: vaultSecret, + }) + if err != nil { + return err + } + go renewer.Renew() + defer renewer.Stop() + + for { + select { + case err := <-renewer.DoneCh(): + if err != nil { + log.Printf("[WARN] %s: failed to renew: %s", d, err) + } + log.Printf("[WARN] %s: renewer done (maybe the lease expired)", d) + return nil + case renewal := <-renewer.RenewCh(): + log.Printf("[TRACE] %s: successfully renewed", d) + printVaultWarnings(d, renewal.Secret.Warnings) + updateSecret(secret, renewal.Secret) + case <-d.stopChan(): + return ErrStopped + } + } +} + +// durationFrom cert gets the duration of validity from cert data and +// returns that value as an integer number of seconds +func durationFromCert(certData string) int { + block, _ := pem.Decode([]byte(certData)) + if block == nil { + return -1 + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + log.Printf("[WARN] Unable to parse certificate data: %s", err) + return -1 + } + + return int(cert.NotAfter.Sub(cert.NotBefore).Seconds()) +} + +// leaseCheckWait accepts a secret and returns the recommended amount of +// time to sleep. +func leaseCheckWait(s *Secret) time.Duration { + // Handle whether this is an auth or a regular secret. + base := s.LeaseDuration + if s.Auth != nil && s.Auth.LeaseDuration > 0 { + base = s.Auth.LeaseDuration + } + + // Handle if this is a certificate with no lease + if certInterface, ok := s.Data["certificate"]; ok && s.LeaseID == "" { + if certData, ok := certInterface.(string); ok { + newDuration := durationFromCert(certData) + if newDuration > 0 { + log.Printf("[DEBUG] Found certificate and set lease duration to %d seconds", newDuration) + base = newDuration + } + } + } + + // Ensure we have a lease duration, since sometimes this can be zero. + if base <= 0 { + base = int(VaultDefaultLeaseDuration.Seconds()) + } + + // Convert to float seconds. + sleep := float64(time.Duration(base) * time.Second) + + if vaultSecretRenewable(s) { + // Renew at 1/3 the remaining lease. This will give us an opportunity to retry + // at least one more time should the first renewal fail. + sleep = sleep / 3.0 + + // Use some randomness so many clients do not hit Vault simultaneously. + sleep = sleep * (rand.Float64() + 1) / 2.0 + } else { + // For non-renewable leases set the renew duration to use much of the secret + // lease as possible. Use a stagger over 85%-95% of the lease duration so that + // many clients do not hit Vault simultaneously. + sleep = sleep * (.85 + rand.Float64()*0.1) + } + + return time.Duration(sleep) +} + +// printVaultWarnings prints warnings for a given dependency. +func printVaultWarnings(d Dependency, warnings []string) { + for _, w := range warnings { + log.Printf("[WARN] %s: %s", d, w) + } +} + +// vaultSecretRenewable determines if the given secret is renewable. +func vaultSecretRenewable(s *Secret) bool { + if s.Auth != nil { + return s.Auth.Renewable + } + return s.Renewable +} + +// transformSecret transforms an api secret into our secret. This does not deep +// copy underlying deep data structures, so it's not safe to modify the vault +// secret as that may modify the data in the transformed secret. +func transformSecret(theirs *api.Secret) *Secret { + var ours Secret + updateSecret(&ours, theirs) + return &ours +} + +// updateSecret updates our secret with the new data from the api, careful to +// not overwrite missing data. Renewals don't include the original secret, and +// we don't want to delete that data accidentally. +func updateSecret(ours *Secret, theirs *api.Secret) { + if theirs.RequestID != "" { + ours.RequestID = theirs.RequestID + } + + if theirs.LeaseID != "" { + ours.LeaseID = theirs.LeaseID + } + + if theirs.LeaseDuration != 0 { + ours.LeaseDuration = theirs.LeaseDuration + } + + if theirs.Renewable { + ours.Renewable = theirs.Renewable + } + + if len(theirs.Data) != 0 { + ours.Data = theirs.Data + } + + if len(theirs.Warnings) != 0 { + ours.Warnings = theirs.Warnings + } + + if theirs.Auth != nil { + if ours.Auth == nil { + ours.Auth = &SecretAuth{} + } + + if theirs.Auth.ClientToken != "" { + ours.Auth.ClientToken = theirs.Auth.ClientToken + } + + if theirs.Auth.Accessor != "" { + ours.Auth.Accessor = theirs.Auth.Accessor + } + + if len(theirs.Auth.Policies) != 0 { + ours.Auth.Policies = theirs.Auth.Policies + } + + if len(theirs.Auth.Metadata) != 0 { + ours.Auth.Metadata = theirs.Auth.Metadata + } + + if theirs.Auth.LeaseDuration != 0 { + ours.Auth.LeaseDuration = theirs.Auth.LeaseDuration + } + + if theirs.Auth.Renewable { + ours.Auth.Renewable = theirs.Auth.Renewable + } + } + + if theirs.WrapInfo != nil { + if ours.WrapInfo == nil { + ours.WrapInfo = &SecretWrapInfo{} + } + + if theirs.WrapInfo.Token != "" { + ours.WrapInfo.Token = theirs.WrapInfo.Token + } + + if theirs.WrapInfo.TTL != 0 { + ours.WrapInfo.TTL = theirs.WrapInfo.TTL + } + + if !theirs.WrapInfo.CreationTime.IsZero() { + ours.WrapInfo.CreationTime = theirs.WrapInfo.CreationTime + } + + if theirs.WrapInfo.WrappedAccessor != "" { + ours.WrapInfo.WrappedAccessor = theirs.WrapInfo.WrappedAccessor + } + } +} + +func isKVv2(client *api.Client, path string) (string, bool, error) { + // We don't want to use a wrapping call here so save any custom value and + // restore after + currentWrappingLookupFunc := client.CurrentWrappingLookupFunc() + client.SetWrappingLookupFunc(nil) + defer client.SetWrappingLookupFunc(currentWrappingLookupFunc) + currentOutputCurlString := client.OutputCurlString() + client.SetOutputCurlString(false) + defer client.SetOutputCurlString(currentOutputCurlString) + + r := client.NewRequest("GET", "/v1/sys/internal/ui/mounts/"+path) + resp, err := client.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + // If we get a 404 we are using an older version of vault, default to + // version 1 + if resp != nil && resp.StatusCode == 404 { + return "", false, nil + } + + // anonymous requests may fail to access /sys/internal/ui path + // Vault v1.1.3 returns 500 status code but may return 4XX in future + if client.Token() == "" { + return "", false, nil + } + + return "", false, err + } + + secret, err := api.ParseSecret(resp.Body) + if err != nil { + return "", false, err + } + var mountPath string + if mountPathRaw, ok := secret.Data["path"]; ok { + mountPath = mountPathRaw.(string) + } + var mountType string + if mountTypeRaw, ok := secret.Data["type"]; ok { + mountType = mountTypeRaw.(string) + } + options := secret.Data["options"] + if options == nil { + return mountPath, false, nil + } + versionRaw := options.(map[string]interface{})["version"] + if versionRaw == nil { + return mountPath, false, nil + } + version := versionRaw.(string) + switch version { + case "", "1": + return mountPath, false, nil + case "2": + return mountPath, mountType == "kv", nil + } + + return mountPath, false, nil +} + +func addPrefixToVKVPath(p, mountPath, apiPrefix string) string { + switch { + case p == mountPath, p == strings.TrimSuffix(mountPath, "/"): + return path.Join(mountPath, apiPrefix) + default: + p = strings.TrimPrefix(p, mountPath) + // Don't add /data to the path if it's been added manually. + if strings.HasPrefix(p, apiPrefix) { + return path.Join(mountPath, p) + } + return path.Join(mountPath, apiPrefix, p) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go new file mode 100644 index 000000000..3e80fd293 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go @@ -0,0 +1,126 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "sort" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultListQuery)(nil) +) + +// VaultListQuery is the dependency to Vault for a secret +type VaultListQuery struct { + stopCh chan struct{} + + path string +} + +// NewVaultListQuery creates a new datacenter dependency. +func NewVaultListQuery(s string) (*VaultListQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.list: invalid format: %q", s) + } + + return &VaultListQuery{ + stopCh: make(chan struct{}, 1), + path: s, + }, nil +} + +// Fetch queries the Vault API +func (d *VaultListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{}) + + // If this is not the first query, poll to simulate blocking-queries. + if opts.WaitIndex != 0 { + dur := VaultDefaultLeaseDuration + log.Printf("[TRACE] %s: long polling for %s", d, dur) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(dur): + } + } + + // If we got this far, we either didn't have a secret to renew, the secret was + // not renewable, or the renewal failed, so attempt a fresh list. + log.Printf("[TRACE] %s: LIST %s", d, &url.URL{ + Path: "/v1/" + d.path, + RawQuery: opts.String(), + }) + secret, err := clients.Vault().Logical().List(d.path) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + var result []string + + // The secret could be nil if it does not exist. + if secret == nil || secret.Data == nil { + log.Printf("[TRACE] %s: no data", d) + return respWithMetadata(result) + } + + // This is a weird thing that happened once... + keys, ok := secret.Data["keys"] + if !ok { + log.Printf("[TRACE] %s: no keys", d) + return respWithMetadata(result) + } + + list, ok := keys.([]interface{}) + if !ok { + log.Printf("[TRACE] %s: not list", d) + return nil, nil, fmt.Errorf("%s: unexpected response", d) + } + + for _, v := range list { + typed, ok := v.(string) + if !ok { + return nil, nil, fmt.Errorf("%s: non-string in list", d) + } + result = append(result, typed) + } + sort.Strings(result) + + log.Printf("[TRACE] %s: returned %d results", d, len(result)) + + return respWithMetadata(result) +} + +// CanShare returns if this dependency is shareable. +func (d *VaultListQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultListQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultListQuery) String() string { + return fmt.Sprintf("vault.list(%s)", d.path) +} + +// Type returns the type of this dependency. +func (d *VaultListQuery) Type() Type { + return TypeVault +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go new file mode 100644 index 000000000..00ebf27ec --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go @@ -0,0 +1,175 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultReadQuery)(nil) +) + +// VaultReadQuery is the dependency to Vault for a secret +type VaultReadQuery struct { + stopCh chan struct{} + sleepCh chan time.Duration + + rawPath string + queryValues url.Values + secret *Secret + isKVv2 *bool + secretPath string + + // vaultSecret is the actual Vault secret which we are renewing + vaultSecret *api.Secret +} + +// NewVaultReadQuery creates a new datacenter dependency. +func NewVaultReadQuery(s string) (*VaultReadQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.read: invalid format: %q", s) + } + + secretURL, err := url.Parse(s) + if err != nil { + return nil, err + } + + return &VaultReadQuery{ + stopCh: make(chan struct{}, 1), + sleepCh: make(chan time.Duration, 1), + rawPath: secretURL.Path, + queryValues: secretURL.Query(), + }, nil +} + +// Fetch queries the Vault API +func (d *VaultReadQuery) Fetch(clients *ClientSet, opts *QueryOptions, +) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + select { + case dur := <-d.sleepCh: + time.Sleep(dur) + default: + } + + firstRun := d.secret == nil + + if !firstRun && vaultSecretRenewable(d.secret) { + err := renewSecret(clients, d) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + } + + err := d.fetchSecret(clients, opts) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + if !vaultSecretRenewable(d.secret) { + dur := leaseCheckWait(d.secret) + log.Printf("[TRACE] %s: non-renewable secret, set sleep for %s", d, dur) + d.sleepCh <- dur + } + + return respWithMetadata(d.secret) +} + +func (d *VaultReadQuery) fetchSecret(clients *ClientSet, opts *QueryOptions, +) error { + opts = opts.Merge(&QueryOptions{}) + vaultSecret, err := d.readSecret(clients, opts) + if err == nil { + printVaultWarnings(d, vaultSecret.Warnings) + d.vaultSecret = vaultSecret + // the cloned secret which will be exposed to the template + d.secret = transformSecret(vaultSecret) + } + return err +} + +func (d *VaultReadQuery) stopChan() chan struct{} { + return d.stopCh +} + +func (d *VaultReadQuery) secrets() (*Secret, *api.Secret) { + return d.secret, d.vaultSecret +} + +// CanShare returns if this dependency is shareable. +func (d *VaultReadQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultReadQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultReadQuery) String() string { + return fmt.Sprintf("vault.read(%s)", d.rawPath) +} + +// Type returns the type of this dependency. +func (d *VaultReadQuery) Type() Type { + return TypeVault +} + +func (d *VaultReadQuery) readSecret(clients *ClientSet, opts *QueryOptions) (*api.Secret, error) { + vaultClient := clients.Vault() + + // Check whether this secret refers to a KV v2 entry if we haven't yet. + if d.isKVv2 == nil { + mountPath, isKVv2, err := isKVv2(vaultClient, d.rawPath) + if err != nil { + log.Printf("[WARN] %s: failed to check if %s is KVv2, "+ + "assume not: %s", d, d.rawPath, err) + isKVv2 = false + d.secretPath = d.rawPath + } else if isKVv2 { + d.secretPath = addPrefixToVKVPath(d.rawPath, mountPath, "data") + } else { + d.secretPath = d.rawPath + } + d.isKVv2 = &isKVv2 + } + + queryString := d.queryValues.Encode() + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/" + d.secretPath, + RawQuery: queryString, + }) + vaultSecret, err := vaultClient.Logical().ReadWithData(d.secretPath, + d.queryValues) + + if err != nil { + return nil, errors.Wrap(err, d.String()) + } + if vaultSecret == nil || deletedKVv2(vaultSecret) { + return nil, fmt.Errorf("no secret exists at %s", d.secretPath) + } + return vaultSecret, nil +} + +func deletedKVv2(s *api.Secret) bool { + switch md := s.Data["metadata"].(type) { + case map[string]interface{}: + return md["deletion_time"] != "" + } + return false +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go new file mode 100644 index 000000000..61fa29cfa --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go @@ -0,0 +1,95 @@ +package dependency + +import ( + "log" + "time" + + "github.com/hashicorp/vault/api" +) + +var ( + // Ensure implements + _ Dependency = (*VaultTokenQuery)(nil) +) + +// VaultTokenQuery is the dependency to Vault for a secret +type VaultTokenQuery struct { + stopCh chan struct{} + secret *Secret + vaultSecret *api.Secret +} + +// NewVaultTokenQuery creates a new dependency. +func NewVaultTokenQuery(token string) (*VaultTokenQuery, error) { + vaultSecret := &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: token, + Renewable: true, + LeaseDuration: 1, + }, + } + return &VaultTokenQuery{ + stopCh: make(chan struct{}, 1), + vaultSecret: vaultSecret, + secret: transformSecret(vaultSecret), + }, nil +} + +// Fetch queries the Vault API +func (d *VaultTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + if vaultSecretRenewable(d.secret) { + renewSecret(clients, d) + } + + // The secret isn't renewable, probably the generic secret backend. + // TODO This is incorrect when given a non-renewable template. We should + // instead to a lookup self to determine the lease duration. + opts = opts.Merge(&QueryOptions{}) + dur := leaseCheckWait(d.secret) + if dur < opts.VaultGrace { + dur = opts.VaultGrace + } + + log.Printf("[TRACE] %s: token is not renewable, sleeping for %s", d, dur) + select { + case <-time.After(dur): + case <-d.stopCh: + return nil, nil, ErrStopped + } + + return nil, nil, ErrLeaseExpired +} + +func (d *VaultTokenQuery) stopChan() chan struct{} { + return d.stopCh +} + +func (d *VaultTokenQuery) secrets() (*Secret, *api.Secret) { + return d.secret, d.vaultSecret +} + +// CanShare returns if this dependency is shareable. +func (d *VaultTokenQuery) CanShare() bool { + return false +} + +// Stop halts the dependency's fetch function. +func (d *VaultTokenQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultTokenQuery) String() string { + return "vault.token" +} + +// Type returns the type of this dependency. +func (d *VaultTokenQuery) Type() Type { + return TypeVault +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go new file mode 100644 index 000000000..c2841712f --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go @@ -0,0 +1,177 @@ +package dependency + +import ( + "crypto/sha1" + "fmt" + "io" + "log" + "net/url" + "sort" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultWriteQuery)(nil) +) + +// VaultWriteQuery is the dependency to Vault for a secret +type VaultWriteQuery struct { + stopCh chan struct{} + sleepCh chan time.Duration + + path string + data map[string]interface{} + dataHash string + secret *Secret + + // vaultSecret is the actual Vault secret which we are renewing + vaultSecret *api.Secret +} + +// NewVaultWriteQuery creates a new datacenter dependency. +func NewVaultWriteQuery(s string, d map[string]interface{}) (*VaultWriteQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.write: invalid format: %q", s) + } + + return &VaultWriteQuery{ + stopCh: make(chan struct{}, 1), + sleepCh: make(chan time.Duration, 1), + path: s, + data: d, + dataHash: sha1Map(d), + }, nil +} + +// Fetch queries the Vault API +func (d *VaultWriteQuery) Fetch(clients *ClientSet, opts *QueryOptions, +) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + select { + case dur := <-d.sleepCh: + time.Sleep(dur) + default: + } + + firstRun := d.secret == nil + + if !firstRun && vaultSecretRenewable(d.secret) { + err := renewSecret(clients, d) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + } + + opts = opts.Merge(&QueryOptions{}) + vaultSecret, err := d.writeSecret(clients, opts) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + // vaultSecret == nil when writing to KVv1 engines + if vaultSecret == nil { + return respWithMetadata(d.secret) + } + + printVaultWarnings(d, vaultSecret.Warnings) + d.vaultSecret = vaultSecret + // cloned secret which will be exposed to the template + d.secret = transformSecret(vaultSecret) + + if !vaultSecretRenewable(d.secret) { + dur := leaseCheckWait(d.secret) + log.Printf("[TRACE] %s: non-renewable secret, set sleep for %s", d, dur) + d.sleepCh <- dur + } + + return respWithMetadata(d.secret) +} + +// meet renewer interface +func (d *VaultWriteQuery) stopChan() chan struct{} { + return d.stopCh +} + +func (d *VaultWriteQuery) secrets() (*Secret, *api.Secret) { + return d.secret, d.vaultSecret +} + +// CanShare returns if this dependency is shareable. +func (d *VaultWriteQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultWriteQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultWriteQuery) String() string { + return fmt.Sprintf("vault.write(%s -> %s)", d.path, d.dataHash) +} + +// Type returns the type of this dependency. +func (d *VaultWriteQuery) Type() Type { + return TypeVault +} + +// sha1Map returns the sha1 hash of the data in the map. The reason this data is +// hashed is because it appears in the output and could contain sensitive +// information. +func sha1Map(m map[string]interface{}) string { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + h := sha1.New() + for _, k := range keys { + io.WriteString(h, fmt.Sprintf("%s=%q", k, m[k])) + } + + return fmt.Sprintf("%.4x", h.Sum(nil)) +} + +func (d *VaultWriteQuery) printWarnings(warnings []string) { + for _, w := range warnings { + log.Printf("[WARN] %s: %s", d, w) + } +} + +func (d *VaultWriteQuery) writeSecret(clients *ClientSet, opts *QueryOptions) (*api.Secret, error) { + log.Printf("[TRACE] %s: PUT %s", d, &url.URL{ + Path: "/v1/" + d.path, + RawQuery: opts.String(), + }) + + data := d.data + + _, isv2, _ := isKVv2(clients.Vault(), d.path) + if isv2 { + data = map[string]interface{}{"data": d.data} + } + + vaultSecret, err := clients.Vault().Logical().Write(d.path, data) + if err != nil { + return nil, errors.Wrap(err, d.String()) + } + // vaultSecret is always nil when KVv1 engine (isv2==false) + if isv2 && vaultSecret == nil { + return nil, fmt.Errorf("no secret exists at %s", d.path) + } + + return vaultSecret, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/dedup.go b/vendor/github.com/hashicorp/consul-template/manager/dedup.go new file mode 100644 index 000000000..3f5f9a950 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/dedup.go @@ -0,0 +1,512 @@ +package manager + +import ( + "bytes" + "compress/lzw" + "encoding/gob" + "fmt" + "log" + "path" + "sync" + "time" + + "github.com/mitchellh/hashstructure" + + "github.com/hashicorp/consul-template/config" + dep "github.com/hashicorp/consul-template/dependency" + "github.com/hashicorp/consul-template/template" + "github.com/hashicorp/consul-template/version" + consulapi "github.com/hashicorp/consul/api" +) + +var ( + // sessionCreateRetry is the amount of time we wait + // to recreate a session when lost. + sessionCreateRetry = 15 * time.Second + + // lockRetry is the interval on which we try to re-acquire locks + lockRetry = 10 * time.Second + + // listRetry is the interval on which we retry listing a data path + listRetry = 10 * time.Second + + // timeout passed through to consul api client Lock + // here to override in testing (see ./dedup_test.go) + lockWaitTime = 15 * time.Second +) + +const ( + templateNoDataStr = "__NO_DATA__" +) + +// templateData is GOB encoded share the dependency values +type templateData struct { + // Version is the version of Consul Template which created this template data. + // This is important because users may be running multiple versions of CT + // with the same templates. This provides a nicer upgrade path. + Version string + + // Data is the actual template data. + Data map[string]interface{} +} + +func templateNoData() []byte { + return []byte(templateNoDataStr) +} + +// DedupManager is used to de-duplicate which instance of Consul-Template +// is handling each template. For each template, a lock path is determined +// using the MD5 of the template. This path is used to elect a "leader" +// instance. +// +// The leader instance operations like usual, but any time a template is +// rendered, any of the data required for rendering is stored in the +// Consul KV store under the lock path. +// +// The follower instances depend on the leader to do the primary watching +// and rendering, and instead only watch the aggregated data in the KV. +// Followers wait for updates and re-render the template. +// +// If a template depends on 50 views, and is running on 50 machines, that +// would normally require 2500 blocking queries. Using deduplication, one +// instance has 50 view queries, plus 50 additional queries on the lock +// path for a total of 100. +// +type DedupManager struct { + // config is the deduplicate configuration + config *config.DedupConfig + + // clients is used to access the underlying clients + clients *dep.ClientSet + + // Brain is where we inject updates + brain *template.Brain + + // templates is the set of templates we are trying to dedup + templates []*template.Template + + // leader tracks if we are currently the leader + leader map[*template.Template]<-chan struct{} + leaderLock sync.RWMutex + + // lastWrite tracks the hash of the data paths + lastWrite map[*template.Template]uint64 + lastWriteLock sync.RWMutex + + // updateCh is used to indicate an update watched data + updateCh chan struct{} + + // wg is used to wait for a clean shutdown + wg sync.WaitGroup + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewDedupManager creates a new Dedup manager +func NewDedupManager(config *config.DedupConfig, clients *dep.ClientSet, brain *template.Brain, templates []*template.Template) (*DedupManager, error) { + d := &DedupManager{ + config: config, + clients: clients, + brain: brain, + templates: templates, + leader: make(map[*template.Template]<-chan struct{}), + lastWrite: make(map[*template.Template]uint64), + updateCh: make(chan struct{}, 1), + stopCh: make(chan struct{}), + } + return d, nil +} + +// Start is used to start the de-duplication manager +func (d *DedupManager) Start() error { + log.Printf("[INFO] (dedup) starting de-duplication manager") + + client := d.clients.Consul() + go d.createSession(client) + + // Start to watch each template + for _, t := range d.templates { + go d.watchTemplate(client, t) + } + return nil +} + +// Stop is used to stop the de-duplication manager +func (d *DedupManager) Stop() error { + d.stopLock.Lock() + defer d.stopLock.Unlock() + if d.stop { + return nil + } + + log.Printf("[INFO] (dedup) stopping de-duplication manager") + d.stop = true + close(d.stopCh) + d.wg.Wait() + return nil +} + +// createSession is used to create and maintain a session to Consul +func (d *DedupManager) createSession(client *consulapi.Client) { +START: + log.Printf("[INFO] (dedup) attempting to create session") + session := client.Session() + sessionCh := make(chan struct{}) + ttl := fmt.Sprintf("%.6fs", float64(*d.config.TTL)/float64(time.Second)) + se := &consulapi.SessionEntry{ + Name: "Consul-Template de-duplication", + Behavior: "delete", + TTL: ttl, + LockDelay: 1 * time.Millisecond, + } + id, _, err := session.Create(se, nil) + if err != nil { + log.Printf("[ERR] (dedup) failed to create session: %v", err) + goto WAIT + } + log.Printf("[INFO] (dedup) created session %s", id) + + // Attempt to lock each template + for _, t := range d.templates { + d.wg.Add(1) + go d.attemptLock(client, id, sessionCh, t) + } + + // Renew our session periodically + if err := session.RenewPeriodic("15s", id, nil, d.stopCh); err != nil { + log.Printf("[ERR] (dedup) failed to renew session: %v", err) + } + close(sessionCh) + d.wg.Wait() + +WAIT: + select { + case <-time.After(sessionCreateRetry): + goto START + case <-d.stopCh: + return + } +} + +// IsLeader checks if we are currently the leader instance +func (d *DedupManager) IsLeader(tmpl *template.Template) bool { + d.leaderLock.RLock() + defer d.leaderLock.RUnlock() + + lockCh, ok := d.leader[tmpl] + if !ok { + return false + } + select { + case <-lockCh: + return false + default: + return true + } +} + +// UpdateDeps is used to update the values of the dependencies for a template +func (d *DedupManager) UpdateDeps(t *template.Template, deps []dep.Dependency) error { + // Calculate the path to write updates to + dataPath := path.Join(*d.config.Prefix, t.ID(), "data") + + // Package up the dependency data + td := templateData{ + Version: version.Version, + Data: make(map[string]interface{}), + } + for _, dp := range deps { + // Skip any dependencies that can't be shared + if !dp.CanShare() { + continue + } + + // Pull the current value from the brain + val, ok := d.brain.Recall(dp) + if ok { + td.Data[dp.String()] = val + } + } + + // Compute stable hash of the data. Note we don't compute this over the actual + // encoded value since gob encoding does not guarantee stable ordering for + // maps so spuriously returns a different hash most times. See + // https://github.com/hashicorp/consul-template/issues/1099. + hash, err := hashstructure.Hash(td, nil) + if err != nil { + return fmt.Errorf("calculating hash failed: %v", err) + } + d.lastWriteLock.RLock() + existing, ok := d.lastWrite[t] + d.lastWriteLock.RUnlock() + if ok && existing == hash { + log.Printf("[INFO] (dedup) de-duplicate data '%s' already current", + dataPath) + return nil + } + + // Encode via GOB and LZW compress + var buf bytes.Buffer + compress := lzw.NewWriter(&buf, lzw.LSB, 8) + enc := gob.NewEncoder(compress) + if err := enc.Encode(&td); err != nil { + return fmt.Errorf("encode failed: %v", err) + } + compress.Close() + + // Write the KV update + kvPair := consulapi.KVPair{ + Key: dataPath, + Value: buf.Bytes(), + Flags: consulapi.LockFlagValue, + } + client := d.clients.Consul() + if _, err := client.KV().Put(&kvPair, nil); err != nil { + return fmt.Errorf("failed to write '%s': %v", dataPath, err) + } + log.Printf("[INFO] (dedup) updated de-duplicate data '%s'", dataPath) + d.lastWriteLock.Lock() + d.lastWrite[t] = hash + d.lastWriteLock.Unlock() + return nil +} + +// UpdateCh returns a channel to watch for dependency updates +func (d *DedupManager) UpdateCh() <-chan struct{} { + return d.updateCh +} + +// setLeader sets if we are currently the leader instance +func (d *DedupManager) setLeader(tmpl *template.Template, lockCh <-chan struct{}) { + // Update the lock state + d.leaderLock.Lock() + if lockCh != nil { + d.leader[tmpl] = lockCh + } else { + delete(d.leader, tmpl) + } + d.leaderLock.Unlock() + + // Clear the lastWrite hash if we've lost leadership + if lockCh == nil { + d.lastWriteLock.Lock() + delete(d.lastWrite, tmpl) + d.lastWriteLock.Unlock() + } + + // Do an async notify of an update + select { + case d.updateCh <- struct{}{}: + default: + } +} + +func (d *DedupManager) watchTemplate(client *consulapi.Client, t *template.Template) { + log.Printf("[INFO] (dedup) starting watch for template hash %s", t.ID()) + path := path.Join(*d.config.Prefix, t.ID(), "data") + + // Determine if stale queries are allowed + var allowStale bool + if *d.config.MaxStale != 0 { + allowStale = true + } + + // Setup our query options + opts := &consulapi.QueryOptions{ + AllowStale: allowStale, + WaitTime: 60 * time.Second, + } + + var lastData []byte + var lastIndex uint64 + +START: + // Stop listening if we're stopped + select { + case <-d.stopCh: + return + default: + } + + // If we are current the leader, wait for leadership lost + d.leaderLock.RLock() + lockCh, ok := d.leader[t] + d.leaderLock.RUnlock() + if ok { + select { + case <-lockCh: + goto START + case <-d.stopCh: + return + } + } + + // Block for updates on the data key + log.Printf("[INFO] (dedup) listing data for template hash %s", t.ID()) + pair, meta, err := client.KV().Get(path, opts) + if err != nil { + log.Printf("[ERR] (dedup) failed to get '%s': %v", path, err) + select { + case <-time.After(listRetry): + goto START + case <-d.stopCh: + return + } + } + opts.WaitIndex = meta.LastIndex + + // Stop listening if we're stopped + select { + case <-d.stopCh: + return + default: + } + + // If we've exceeded the maximum staleness, retry without stale + if allowStale && meta.LastContact > *d.config.MaxStale { + allowStale = false + log.Printf("[DEBUG] (dedup) %s stale data (last contact exceeded max_stale)", path) + goto START + } + + // Re-enable stale queries if allowed + if *d.config.MaxStale > 0 { + allowStale = true + } + + if meta.LastIndex == lastIndex { + log.Printf("[TRACE] (dedup) %s no new data (index was the same)", path) + goto START + } + + if meta.LastIndex < lastIndex { + log.Printf("[TRACE] (dedup) %s had a lower index, resetting", path) + lastIndex = 0 + goto START + } + lastIndex = meta.LastIndex + + var data []byte + if pair != nil { + data = pair.Value + } + if bytes.Equal(lastData, data) { + log.Printf("[TRACE] (dedup) %s no new data (contents were the same)", path) + goto START + } + lastData = data + + // If we are current the leader, wait for leadership lost + d.leaderLock.RLock() + lockCh, ok = d.leader[t] + d.leaderLock.RUnlock() + if ok { + select { + case <-lockCh: + goto START + case <-d.stopCh: + return + } + } + + // Parse the data file + if pair != nil && pair.Flags == consulapi.LockFlagValue && !bytes.Equal(pair.Value, templateNoData()) { + d.parseData(pair.Key, pair.Value) + } + goto START +} + +// parseData is used to update brain from a KV data pair +func (d *DedupManager) parseData(path string, raw []byte) { + // Setup the decompression and decoders + r := bytes.NewReader(raw) + decompress := lzw.NewReader(r, lzw.LSB, 8) + defer decompress.Close() + dec := gob.NewDecoder(decompress) + + // Decode the data + var td templateData + if err := dec.Decode(&td); err != nil { + log.Printf("[ERR] (dedup) failed to decode '%s': %v", + path, err) + return + } + if td.Version != version.Version { + log.Printf("[WARN] (dedup) created with different version (%s vs %s)", + td.Version, version.Version) + return + } + log.Printf("[INFO] (dedup) loading %d dependencies from '%s'", + len(td.Data), path) + + // Update the data in the brain + for hashCode, value := range td.Data { + d.brain.ForceSet(hashCode, value) + } + + // Trigger the updateCh + select { + case d.updateCh <- struct{}{}: + default: + } +} + +func (d *DedupManager) attemptLock(client *consulapi.Client, session string, sessionCh chan struct{}, t *template.Template) { + defer d.wg.Done() + for { + log.Printf("[INFO] (dedup) attempting lock for template hash %s", t.ID()) + basePath := path.Join(*d.config.Prefix, t.ID()) + lopts := &consulapi.LockOptions{ + Key: path.Join(basePath, "data"), + Value: templateNoData(), + Session: session, + MonitorRetries: 3, + MonitorRetryTime: 3 * time.Second, + LockWaitTime: lockWaitTime, + } + lock, err := client.LockOpts(lopts) + if err != nil { + log.Printf("[ERR] (dedup) failed to create lock '%s': %v", + lopts.Key, err) + return + } + + var retryCh <-chan time.Time + leaderCh, err := lock.Lock(sessionCh) + if err != nil { + log.Printf("[ERR] (dedup) failed to acquire lock '%s': %v", + lopts.Key, err) + retryCh = time.After(lockRetry) + } else { + log.Printf("[INFO] (dedup) acquired lock '%s'", lopts.Key) + d.setLeader(t, leaderCh) + } + + select { + case <-retryCh: + retryCh = nil + continue + case <-leaderCh: + log.Printf("[WARN] (dedup) lost lock ownership '%s'", lopts.Key) + d.setLeader(t, nil) + continue + case <-sessionCh: + log.Printf("[INFO] (dedup) releasing session '%s'", lopts.Key) + d.setLeader(t, nil) + _, err = client.Session().Destroy(session, nil) + if err != nil { + log.Printf("[ERROR] (dedup) failed destroying session '%s', %s", session, err) + } + return + case <-d.stopCh: + log.Printf("[INFO] (dedup) releasing lock '%s'", lopts.Key) + _, err = client.Session().Destroy(session, nil) + if err != nil { + log.Printf("[ERROR] (dedup) failed destroying session '%s', %s", session, err) + } + return + } + } +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/errors.go b/vendor/github.com/hashicorp/consul-template/manager/errors.go new file mode 100644 index 000000000..dbb84c36e --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/errors.go @@ -0,0 +1,31 @@ +package manager + +import "fmt" + +// ErrExitable is an interface that defines an integer ExitStatus() function. +type ErrExitable interface { + ExitStatus() int +} + +var _ error = new(ErrChildDied) +var _ ErrExitable = new(ErrChildDied) + +// ErrChildDied is the error returned when the child process prematurely dies. +type ErrChildDied struct { + code int +} + +// NewErrChildDied creates a new error with the given exit code. +func NewErrChildDied(c int) *ErrChildDied { + return &ErrChildDied{code: c} +} + +// Error implements the error interface. +func (e *ErrChildDied) Error() string { + return fmt.Sprintf("child process died with exit code %d", e.code) +} + +// ExitStatus implements the ErrExitable interface. +func (e *ErrChildDied) ExitStatus() int { + return e.code +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/runner.go b/vendor/github.com/hashicorp/consul-template/manager/runner.go new file mode 100644 index 000000000..877f4bf94 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/runner.go @@ -0,0 +1,1305 @@ +package manager + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "strconv" + "sync" + "time" + + "github.com/hashicorp/consul-template/child" + "github.com/hashicorp/consul-template/config" + dep "github.com/hashicorp/consul-template/dependency" + "github.com/hashicorp/consul-template/renderer" + "github.com/hashicorp/consul-template/template" + "github.com/hashicorp/consul-template/watch" + multierror "github.com/hashicorp/go-multierror" + shellwords "github.com/mattn/go-shellwords" + "github.com/pkg/errors" +) + +const ( + // saneViewLimit is the number of views that we consider "sane" before we + // warn the user that they might be DDoSing their Consul cluster. + saneViewLimit = 128 +) + +// Runner responsible rendering Templates and invoking Commands. +type Runner struct { + // ErrCh and DoneCh are channels where errors and finish notifications occur. + ErrCh chan error + DoneCh chan struct{} + + // config is the Config that created this Runner. It is used internally to + // construct other objects and pass data. + config *config.Config + + // signals sending output to STDOUT instead of to a file. + dry bool + + // outStream and errStream are the io.Writer streams where the runner will + // write information. These can be modified by calling SetOutStream and + // SetErrStream accordingly. + + // inStream is the ioReader where the runner will read information. + outStream, errStream io.Writer + inStream io.Reader + + // ctemplatesMap is a map of each template ID to the TemplateConfigs + // that made it. + ctemplatesMap map[string]config.TemplateConfigs + + // templates is the list of calculated templates. + templates []*template.Template + + // renderEvents is a mapping of a template ID to the render event. + renderEvents map[string]*RenderEvent + + // renderEventLock protects access into the renderEvents map + renderEventsLock sync.RWMutex + + // renderedCh is used to signal that a template has been rendered + renderedCh chan struct{} + + // renderEventCh is used to signal that there is a new render event. A + // render event doesn't necessarily mean that a template has been rendered, + // only that templates attempted to render and may have updated their + // dependency sets. + renderEventCh chan struct{} + + // dependencies is the list of dependencies this runner is watching. + dependencies map[string]dep.Dependency + + // dependenciesLock is a lock around touching the dependencies map. + dependenciesLock sync.Mutex + + // watcher is the watcher this runner is using. + watcher *watch.Watcher + + // brain is the internal storage database of returned dependency data. + brain *template.Brain + + // child is the child process under management. This may be nil if not running + // in exec mode. + child *child.Child + + // childLock is the internal lock around the child process. + childLock sync.RWMutex + + // quiescenceMap is the map of templates to their quiescence timers. + // quiescenceCh is the channel where templates report returns from quiescence + // fires. + quiescenceMap map[string]*quiescence + quiescenceCh chan *template.Template + + // dedup is the deduplication manager if enabled + dedup *DedupManager + + // Env represents a custom set of environment variables to populate the + // template and command runtime with. These environment variables will be + // available in both the command's environment as well as the template's + // environment. + Env map[string]string + + // stopLock is the lock around checking if the runner can be stopped + stopLock sync.Mutex + + // stopped is a boolean of whether the runner is stopped + stopped bool +} + +// RenderEvent captures the time and events that occurred for a template +// rendering. +type RenderEvent struct { + // Missing is the list of dependencies that we do not yet have data for, but + // are contained in the watcher. This is different from unwatched dependencies, + // which includes dependencies the watcher has not yet started querying for + // data. + MissingDeps *dep.Set + + // Template is the template attempting to be rendered. + Template *template.Template + + // Contents is the raw, rendered contents from the template. + Contents []byte + + // TemplateConfigs is the list of template configs that correspond to this + // template. + TemplateConfigs []*config.TemplateConfig + + // Unwatched is the list of dependencies that are not present in the watcher. + // This value may change over time due to the n-pass evaluation. + UnwatchedDeps *dep.Set + + // UpdatedAt is the last time this render event was updated. + UpdatedAt time.Time + + // Used is the full list of dependencies seen in the template. Because of + // the n-pass evaluation, this number can change over time. The dependencies + // in this list may or may not have data. This just contains the list of all + // dependencies parsed out of the template with the current data. + UsedDeps *dep.Set + + // WouldRender determines if the template would have been rendered. A template + // would have been rendered if all the dependencies are satisfied, but may + // not have actually rendered if the file was already present or if an error + // occurred when trying to write the file. + WouldRender bool + + // LastWouldRender marks the last time the template would have rendered. + LastWouldRender time.Time + + // DidRender determines if the Template was actually written to disk. In dry + // mode, this will always be false, since templates are not written to disk + // in dry mode. A template is only rendered to disk if all dependencies are + // satisfied and the template is not already in place with the same contents. + DidRender bool + + // LastDidRender marks the last time the template was written to disk. + LastDidRender time.Time + + // ForQuiescence determines if this event is returned early in the + // render loop due to quiescence. When evaluating if all templates have + // been rendered we need to know if the event is triggered by quiesence + // and if we can skip evaluating it as a render event for those purposes + ForQuiescence bool +} + +// NewRunner accepts a slice of TemplateConfigs and returns a pointer to the new +// Runner and any error that occurred during creation. +func NewRunner(config *config.Config, dry bool) (*Runner, error) { + log.Printf("[INFO] (runner) creating new runner (dry: %v, once: %v)", + dry, config.Once) + + runner := &Runner{ + config: config, + dry: dry, + } + + if err := runner.init(); err != nil { + return nil, err + } + + return runner, nil +} + +// Start begins the polling for this runner. Any errors that occur will cause +// this function to push an item onto the runner's error channel and the halt +// execution. This function is blocking and should be called as a goroutine. +func (r *Runner) Start() { + log.Printf("[INFO] (runner) starting") + + // Create the pid before doing anything. + if err := r.storePid(); err != nil { + r.ErrCh <- err + return + } + + // Start the de-duplication manager + var dedupCh <-chan struct{} + if r.dedup != nil { + if err := r.dedup.Start(); err != nil { + r.ErrCh <- err + return + } + dedupCh = r.dedup.UpdateCh() + } + + // Setup the child process exit channel + var childExitCh <-chan int + + // Fire an initial run to parse all the templates and setup the first-pass + // dependencies. This also forces any templates that have no dependencies to + // be rendered immediately (since they are already renderable). + log.Printf("[DEBUG] (runner) running initial templates") + if err := r.Run(); err != nil { + r.ErrCh <- err + return + } + + for { + // Warn the user if they are watching too many dependencies. + if r.watcher.Size() > saneViewLimit { + log.Printf("[WARN] (runner) watching %d dependencies - watching this "+ + "many dependencies could DDoS your consul cluster", r.watcher.Size()) + } else { + log.Printf("[DEBUG] (runner) watching %d dependencies", r.watcher.Size()) + } + + if r.allTemplatesRendered() { + log.Printf("[DEBUG] (runner) all templates rendered") + // Enable quiescence for all templates if we have specified wait + // intervals. + NEXT_Q: + for _, t := range r.templates { + if _, ok := r.quiescenceMap[t.ID()]; ok { + continue NEXT_Q + } + + for _, c := range r.templateConfigsFor(t) { + if *c.Wait.Enabled { + log.Printf("[DEBUG] (runner) enabling template-specific "+ + "quiescence for %q", t.ID()) + r.quiescenceMap[t.ID()] = newQuiescence( + r.quiescenceCh, *c.Wait.Min, *c.Wait.Max, t) + continue NEXT_Q + } + } + + if *r.config.Wait.Enabled { + log.Printf("[DEBUG] (runner) enabling global quiescence for %q", + t.ID()) + r.quiescenceMap[t.ID()] = newQuiescence( + r.quiescenceCh, *r.config.Wait.Min, *r.config.Wait.Max, t) + continue NEXT_Q + } + } + + // If an exec command was given and a command is not currently running, + // spawn the child process for supervision. + if config.StringPresent(r.config.Exec.Command) { + // Lock the child because we are about to check if it exists. + r.childLock.Lock() + + log.Printf("[TRACE] (runner) acquired child lock for command, spawning") + + if r.child == nil { + env := r.config.Exec.Env.Copy() + env.Custom = append(r.childEnv(), env.Custom...) + child, err := spawnChild(&spawnChildInput{ + Stdin: r.inStream, + Stdout: r.outStream, + Stderr: r.errStream, + Command: config.StringVal(r.config.Exec.Command), + Env: env.Env(), + ReloadSignal: config.SignalVal(r.config.Exec.ReloadSignal), + KillSignal: config.SignalVal(r.config.Exec.KillSignal), + KillTimeout: config.TimeDurationVal(r.config.Exec.KillTimeout), + Splay: config.TimeDurationVal(r.config.Exec.Splay), + }) + if err != nil { + r.ErrCh <- err + r.childLock.Unlock() + return + } + r.child = child + } + + // Unlock the child, we are done now. + r.childLock.Unlock() + + // It's possible that we didn't start a process, in which case no + // channel is returned. If we did get a new exitCh, that means a child + // was spawned, so we need to watch a new exitCh. It is also possible + // that during a run, the child process was restarted, which means a + // new exit channel should be used. + nexitCh := r.child.ExitCh() + if nexitCh != nil { + childExitCh = nexitCh + } + } + + // If we are running in once mode and all our templates are rendered, + // then we should exit here. + if r.config.Once { + log.Printf("[INFO] (runner) once mode and all templates rendered") + + if r.child != nil { + r.stopDedup() + r.stopWatcher() + + log.Printf("[INFO] (runner) waiting for child process to exit") + select { + case c := <-childExitCh: + log.Printf("[INFO] (runner) child process died") + r.ErrCh <- NewErrChildDied(c) + return + case <-r.DoneCh: + } + } + + r.Stop() + return + } + } + + OUTER: + select { + case view := <-r.watcher.DataCh(): + // Receive this update + r.Receive(view.Dependency(), view.Data()) + + // Drain all dependency data. Given a large number of dependencies, it is + // feasible that we have data for more than one of them. Instead of + // wasting CPU cycles rendering templates when we have more dependencies + // waiting to be added to the brain, we drain the entire buffered channel + // on the watcher and then reports when it is done receiving new data + // which the parent select listens for. + // + // Please see https://github.com/hashicorp/consul-template/issues/168 for + // more information about this optimization and the entire backstory. + for { + select { + case view := <-r.watcher.DataCh(): + r.Receive(view.Dependency(), view.Data()) + default: + break OUTER + } + } + + case <-dedupCh: + // We may get triggered by the de-duplication manager for either a change + // in leadership (acquired or lost lock), or an update of data for a template + // that we are watching. + log.Printf("[INFO] (runner) watcher triggered by de-duplication manager") + break OUTER + + case err := <-r.watcher.ErrCh(): + // Push the error back up the stack + log.Printf("[ERR] (runner) watcher reported error: %s", err) + r.ErrCh <- err + return + + case tmpl := <-r.quiescenceCh: + // Remove the quiescence for this template from the map. This will force + // the upcoming Run call to actually evaluate and render the template. + log.Printf("[DEBUG] (runner) received template %q from quiescence", tmpl.ID()) + delete(r.quiescenceMap, tmpl.ID()) + + case c := <-childExitCh: + log.Printf("[INFO] (runner) child process died") + r.ErrCh <- NewErrChildDied(c) + return + + case <-r.DoneCh: + log.Printf("[INFO] (runner) received finish") + return + } + + // If we got this far, that means we got new data or one of the timers + // fired, so attempt to re-render. + if err := r.Run(); err != nil { + r.ErrCh <- err + return + } + } +} + +// Stop halts the execution of this runner and its subprocesses. +func (r *Runner) Stop() { + r.internalStop(false) +} + +// StopImmediately behaves like Stop but won't wait for any splay on any child +// process it may be running. +func (r *Runner) StopImmediately() { + r.internalStop(true) +} + +// TemplateRenderedCh returns a channel that will be triggered when one or more +// templates are rendered. +func (r *Runner) TemplateRenderedCh() <-chan struct{} { + return r.renderedCh +} + +// RenderEventCh returns a channel that will be triggered when there is a new +// render event. +func (r *Runner) RenderEventCh() <-chan struct{} { + return r.renderEventCh +} + +// RenderEvents returns the render events for each template was rendered. The +// map is keyed by template ID. +func (r *Runner) RenderEvents() map[string]*RenderEvent { + r.renderEventsLock.RLock() + defer r.renderEventsLock.RUnlock() + + times := make(map[string]*RenderEvent, len(r.renderEvents)) + for k, v := range r.renderEvents { + times[k] = v + } + return times +} + +func (r *Runner) internalStop(immediately bool) { + r.stopLock.Lock() + defer r.stopLock.Unlock() + + if r.stopped { + return + } + + log.Printf("[INFO] (runner) stopping") + r.stopDedup() + r.stopWatcher() + r.stopChild(immediately) + + if err := r.deletePid(); err != nil { + log.Printf("[WARN] (runner) could not remove pid at %q: %s", + *r.config.PidFile, err) + } + + r.stopped = true + + close(r.DoneCh) +} + +func (r *Runner) stopDedup() { + if r.dedup != nil { + log.Printf("[DEBUG] (runner) stopping de-duplication manager") + r.dedup.Stop() + } +} + +func (r *Runner) stopWatcher() { + if r.watcher != nil { + log.Printf("[DEBUG] (runner) stopping watcher") + r.watcher.Stop() + } +} + +func (r *Runner) stopChild(immediately bool) { + r.childLock.RLock() + defer r.childLock.RUnlock() + + if r.child != nil { + if immediately { + log.Printf("[DEBUG] (runner) stopping child process immediately") + r.child.StopImmediately() + } else { + log.Printf("[DEBUG] (runner) stopping child process") + r.child.Stop() + } + } +} + +// Receive accepts a Dependency and data for that dep. This data is +// cached on the Runner. This data is then used to determine if a Template +// is "renderable" (i.e. all its Dependencies have been downloaded at least +// once). +func (r *Runner) Receive(d dep.Dependency, data interface{}) { + r.dependenciesLock.Lock() + defer r.dependenciesLock.Unlock() + + // Just because we received data, it does not mean that we are actually + // watching for that data. How is that possible you may ask? Well, this + // Runner's data channel is pooled, meaning it accepts multiple data views + // before actually blocking. Whilest this runner is performing a Run() and + // executing diffs, it may be possible that more data was pushed onto the + // data channel pool for a dependency that we no longer care about. + // + // Accepting this dependency would introduce stale data into the brain, and + // that is simply unacceptable. In fact, it is a fun little bug: + // + // https://github.com/hashicorp/consul-template/issues/198 + // + // and by "little" bug, I mean really big bug. + if _, ok := r.dependencies[d.String()]; ok { + log.Printf("[DEBUG] (runner) receiving dependency %s", d) + r.brain.Remember(d, data) + } +} + +// Signal sends a signal to the child process, if it exists. Any errors that +// occur are returned. +func (r *Runner) Signal(s os.Signal) error { + r.childLock.RLock() + defer r.childLock.RUnlock() + if r.child == nil { + return nil + } + return r.child.Signal(s) +} + +// Run iterates over each template in this Runner and conditionally executes +// the template rendering and command execution. +// +// The template is rendered atomically. If and only if the template render +// completes successfully, the optional commands will be executed, if given. +// Please note that all templates are rendered **and then** any commands are +// executed. +func (r *Runner) Run() error { + log.Printf("[DEBUG] (runner) initiating run") + + var newRenderEvent, wouldRenderAny, renderedAny bool + runCtx := &templateRunCtx{ + depsMap: make(map[string]dep.Dependency), + } + + for _, tmpl := range r.templates { + event, err := r.runTemplate(tmpl, runCtx) + if err != nil { + return err + } + + // If there was a render event store it + if event != nil { + r.renderEventsLock.Lock() + r.renderEvents[tmpl.ID()] = event + r.renderEventsLock.Unlock() + + // Record that there is at least one new render event + newRenderEvent = true + + // Record that at least one template would have been rendered. + if event.WouldRender { + wouldRenderAny = true + } + + // Record that at least one template was rendered. + if event.DidRender { + renderedAny = true + } + } + } + + // Check if we need to deliver any rendered signals + if wouldRenderAny || renderedAny { + // Send the signal that a template got rendered + select { + case r.renderedCh <- struct{}{}: + default: + } + } + + // Check if we need to deliver any event signals + if newRenderEvent { + select { + case r.renderEventCh <- struct{}{}: + default: + } + } + + // Perform the diff and update the known dependencies. + r.diffAndUpdateDeps(runCtx.depsMap) + + // Execute each command in sequence, collecting any errors that occur - this + // ensures all commands execute at least once. + var errs []error + for _, t := range runCtx.commands { + command := config.StringVal(t.Exec.Command) + log.Printf("[INFO] (runner) executing command %q from %s", command, t.Display()) + env := t.Exec.Env.Copy() + env.Custom = append(r.childEnv(), env.Custom...) + if _, err := spawnChild(&spawnChildInput{ + Stdin: r.inStream, + Stdout: r.outStream, + Stderr: r.errStream, + Command: command, + Env: env.Env(), + Timeout: config.TimeDurationVal(t.Exec.Timeout), + ReloadSignal: config.SignalVal(t.Exec.ReloadSignal), + KillSignal: config.SignalVal(t.Exec.KillSignal), + KillTimeout: config.TimeDurationVal(t.Exec.KillTimeout), + Splay: config.TimeDurationVal(t.Exec.Splay), + }); err != nil { + s := fmt.Sprintf("failed to execute command %q from %s", command, t.Display()) + errs = append(errs, errors.Wrap(err, s)) + } + } + + // If we got this far and have a child process, we need to send the reload + // signal to the child process. + if renderedAny && r.child != nil { + r.childLock.RLock() + if err := r.child.Reload(); err != nil { + errs = append(errs, err) + } + r.childLock.RUnlock() + } + + // If any errors were returned, convert them to an ErrorList for human + // readability. + if len(errs) != 0 { + var result *multierror.Error + for _, err := range errs { + result = multierror.Append(result, err) + } + return result.ErrorOrNil() + } + + return nil +} + +type templateRunCtx struct { + // commands is the set of commands that will be executed after all templates + // have run. When adding to the commands, care should be taken not to + // duplicate any existing command from a previous template. + commands []*config.TemplateConfig + + // depsMap is the set of dependencies shared across all templates. + depsMap map[string]dep.Dependency +} + +// runTemplate is used to run a particular template. It takes as input the +// template to run and a shared run context that allows sharing of information +// between templates. The run returns a potentially nil render event and any +// error that occured. The render event is nil in the case that the template has +// been already rendered and is a once template or if there is an error. +func (r *Runner) runTemplate(tmpl *template.Template, runCtx *templateRunCtx) (*RenderEvent, error) { + log.Printf("[DEBUG] (runner) checking template %s", tmpl.ID()) + + // Grab the last event + r.renderEventsLock.RLock() + lastEvent := r.renderEvents[tmpl.ID()] + r.renderEventsLock.RUnlock() + + // Create the event + event := &RenderEvent{ + Template: tmpl, + TemplateConfigs: r.templateConfigsFor(tmpl), + } + + if lastEvent != nil { + event.LastWouldRender = lastEvent.LastWouldRender + event.LastDidRender = lastEvent.LastDidRender + } + + // Check if we are currently the leader instance + isLeader := true + if r.dedup != nil { + isLeader = r.dedup.IsLeader(tmpl) + } + + // If we are in once mode and this template was already rendered, move + // onto the next one. We do not want to re-render the template if we are + // in once mode, and we certainly do not want to re-run any commands. + if r.config.Once { + r.renderEventsLock.RLock() + event, ok := r.renderEvents[tmpl.ID()] + r.renderEventsLock.RUnlock() + if ok && (event.WouldRender || event.DidRender) { + log.Printf("[DEBUG] (runner) once mode and already rendered") + return nil, nil + } + } + + // Attempt to render the template, returning any missing dependencies and + // the rendered contents. If there are any missing dependencies, the + // contents cannot be rendered or trusted! + result, err := tmpl.Execute(&template.ExecuteInput{ + Brain: r.brain, + Env: r.childEnv(), + }) + if err != nil { + return nil, errors.Wrap(err, tmpl.Source()) + } + + // Grab the list of used and missing dependencies. + missing, used := result.Missing, result.Used + + // Add the dependency to the list of dependencies for this runner. + for _, d := range used.List() { + // If we've taken over leadership for a template, we may have data + // that is cached, but not have the watcher. We must treat this as + // missing so that we create the watcher and re-run the template. + if isLeader && !r.watcher.Watching(d) { + missing.Add(d) + } + if _, ok := runCtx.depsMap[d.String()]; !ok { + runCtx.depsMap[d.String()] = d + } + } + + // Diff any missing dependencies the template reported with dependencies + // the watcher is watching. + unwatched := new(dep.Set) + for _, d := range missing.List() { + if !r.watcher.Watching(d) { + unwatched.Add(d) + } + } + + // Update the event with the new dependency information + event.MissingDeps = missing + event.UnwatchedDeps = unwatched + event.UsedDeps = used + event.UpdatedAt = time.Now().UTC() + + // If there are unwatched dependencies, start the watcher and exit since we + // won't have data. + if l := unwatched.Len(); l > 0 { + log.Printf("[DEBUG] (runner) was not watching %d dependencies", l) + for _, d := range unwatched.List() { + // If we are deduplicating, we must still handle non-sharable + // dependencies, since those will be ignored. + if isLeader || !d.CanShare() { + r.watcher.Add(d) + } + } + return event, nil + } + + // If the template is missing data for some dependencies then we are not + // ready to render and need to move on to the next one. + if l := missing.Len(); l > 0 { + log.Printf("[DEBUG] (runner) missing data for %d dependencies", l) + return event, nil + } + + // Trigger an update of the de-duplication manager + if r.dedup != nil && isLeader { + if err := r.dedup.UpdateDeps(tmpl, used.List()); err != nil { + log.Printf("[ERR] (runner) failed to update dependency data for de-duplication: %v", err) + } + } + + // If quiescence is activated, start/update the timers and loop back around. + // We do not want to render the templates yet. + if q, ok := r.quiescenceMap[tmpl.ID()]; ok { + q.tick() + // This event is being returned early for quiescence + event.ForQuiescence = true + return event, nil + } + + // For each template configuration that is tied to this template, attempt to + // render it to disk and accumulate commands for later use. + for _, templateConfig := range r.templateConfigsFor(tmpl) { + log.Printf("[DEBUG] (runner) rendering %s", templateConfig.Display()) + + // Render the template, taking dry mode into account + result, err := renderer.Render(&renderer.RenderInput{ + Backup: config.BoolVal(templateConfig.Backup), + Contents: result.Output, + CreateDestDirs: config.BoolVal(templateConfig.CreateDestDirs), + Dry: r.dry, + DryStream: r.outStream, + Path: config.StringVal(templateConfig.Destination), + Perms: config.FileModeVal(templateConfig.Perms), + }) + if err != nil { + return nil, errors.Wrap(err, "error rendering "+templateConfig.Display()) + } + + renderTime := time.Now().UTC() + + // If we would have rendered this template (but we did not because the + // contents were the same or something), we should consider this template + // rendered even though the contents on disk have not been updated. We + // will not fire commands unless the template was _actually_ rendered to + // disk though. + if result.WouldRender { + // This event would have rendered + event.WouldRender = true + event.LastWouldRender = renderTime + } + + // If we _actually_ rendered the template to disk, we want to run the + // appropriate commands. + if result.DidRender { + log.Printf("[INFO] (runner) rendered %s", templateConfig.Display()) + + // This event did render + event.DidRender = true + event.LastDidRender = renderTime + + // Update the contents + event.Contents = result.Contents + + if !r.dry { + // If the template was rendered (changed) and we are not in dry-run mode, + // aggregate commands, ignoring previously known commands + // + // Future-self Q&A: Why not use a map for the commands instead of an + // array with an expensive lookup option? Well I'm glad you asked that + // future-self! One of the API promises is that commands are executed + // in the order in which they are provided in the TemplateConfig + // definitions. If we inserted commands into a map, we would lose that + // relative ordering and people would be unhappy. + // if config.StringPresent(ctemplate.Command) + if c := config.StringVal(templateConfig.Exec.Command); c != "" { + existing := findCommand(templateConfig, runCtx.commands) + if existing != nil { + log.Printf("[DEBUG] (runner) skipping command %q from %s (already appended from %s)", + c, templateConfig.Display(), existing.Display()) + } else { + log.Printf("[DEBUG] (runner) appending command %q from %s", + c, templateConfig.Display()) + runCtx.commands = append(runCtx.commands, templateConfig) + } + } + } + } + } + + return event, nil +} + +// init() creates the Runner's underlying data structures and returns an error +// if any problems occur. +func (r *Runner) init() error { + // Ensure default configuration values + r.config = config.DefaultConfig().Merge(r.config) + r.config.Finalize() + + // Print the final config for debugging + result, err := json.Marshal(r.config) + if err != nil { + return err + } + log.Printf("[DEBUG] (runner) final config: %s", result) + + // Create the clientset + clients, err := newClientSet(r.config) + if err != nil { + return fmt.Errorf("runner: %s", err) + } + + // Create the watcher + watcher, err := newWatcher(r.config, clients, r.config.Once) + if err != nil { + return fmt.Errorf("runner: %s", err) + } + r.watcher = watcher + + numTemplates := len(*r.config.Templates) + templates := make([]*template.Template, 0, numTemplates) + ctemplatesMap := make(map[string]config.TemplateConfigs) + + // Iterate over each TemplateConfig, creating a new Template resource for each + // entry. Templates are parsed and saved, and a map of templates to their + // config templates is kept so templates can lookup their commands and output + // destinations. + for _, ctmpl := range *r.config.Templates { + tmpl, err := template.NewTemplate(&template.NewTemplateInput{ + Source: config.StringVal(ctmpl.Source), + Contents: config.StringVal(ctmpl.Contents), + ErrMissingKey: config.BoolVal(ctmpl.ErrMissingKey), + LeftDelim: config.StringVal(ctmpl.LeftDelim), + RightDelim: config.StringVal(ctmpl.RightDelim), + FunctionBlacklist: ctmpl.FunctionBlacklist, + SandboxPath: config.StringVal(ctmpl.SandboxPath), + }) + if err != nil { + return err + } + + if _, ok := ctemplatesMap[tmpl.ID()]; !ok { + templates = append(templates, tmpl) + } + + if _, ok := ctemplatesMap[tmpl.ID()]; !ok { + ctemplatesMap[tmpl.ID()] = make([]*config.TemplateConfig, 0, 1) + } + ctemplatesMap[tmpl.ID()] = append(ctemplatesMap[tmpl.ID()], ctmpl) + } + + // Convert the map of templates (which was only used to ensure uniqueness) + // back into an array of templates. + r.templates = templates + + r.renderEvents = make(map[string]*RenderEvent, numTemplates) + r.dependencies = make(map[string]dep.Dependency) + + r.renderedCh = make(chan struct{}, 1) + r.renderEventCh = make(chan struct{}, 1) + + r.ctemplatesMap = ctemplatesMap + r.inStream = os.Stdin + r.outStream = os.Stdout + r.errStream = os.Stderr + r.brain = template.NewBrain() + + r.ErrCh = make(chan error) + r.DoneCh = make(chan struct{}) + + r.quiescenceMap = make(map[string]*quiescence) + r.quiescenceCh = make(chan *template.Template) + + if *r.config.Dedup.Enabled { + if r.config.Once { + log.Printf("[INFO] (runner) disabling de-duplication in once mode") + } else { + r.dedup, err = NewDedupManager(r.config.Dedup, clients, r.brain, r.templates) + if err != nil { + return err + } + } + } + + return nil +} + +// diffAndUpdateDeps iterates through the current map of dependencies on this +// runner and stops the watcher for any deps that are no longer required. +// +// At the end of this function, the given depsMap is converted to a slice and +// stored on the runner. +func (r *Runner) diffAndUpdateDeps(depsMap map[string]dep.Dependency) { + r.dependenciesLock.Lock() + defer r.dependenciesLock.Unlock() + + // Diff and up the list of dependencies, stopping any unneeded watchers. + log.Printf("[DEBUG] (runner) diffing and updating dependencies") + + for key, d := range r.dependencies { + if _, ok := depsMap[key]; !ok { + log.Printf("[DEBUG] (runner) %s is no longer needed", d) + r.watcher.Remove(d) + r.brain.Forget(d) + } else { + log.Printf("[DEBUG] (runner) %s is still needed", d) + } + } + + r.dependencies = depsMap +} + +// TemplateConfigFor returns the TemplateConfig for the given Template +func (r *Runner) templateConfigsFor(tmpl *template.Template) []*config.TemplateConfig { + return r.ctemplatesMap[tmpl.ID()] +} + +// TemplateConfigMapping returns a mapping between the template ID and the set +// of TemplateConfig represented by the template ID +func (r *Runner) TemplateConfigMapping() map[string][]*config.TemplateConfig { + // this method is primarily used to support embedding consul-template + // in other applications (ex. Nomad) + m := make(map[string][]*config.TemplateConfig, len(r.ctemplatesMap)) + + for id, set := range r.ctemplatesMap { + ctmpls := make([]*config.TemplateConfig, len(set)) + m[id] = ctmpls + for i, ctmpl := range set { + ctmpls[i] = ctmpl + } + } + + return m +} + +// allTemplatesRendered returns true if all the templates in this Runner have +// been rendered at least one time. +func (r *Runner) allTemplatesRendered() bool { + r.renderEventsLock.RLock() + defer r.renderEventsLock.RUnlock() + + for _, tmpl := range r.templates { + event, rendered := r.renderEvents[tmpl.ID()] + if !rendered { + return false + } + + // Skip evaluation of events from quiescence as they will + // be default unrendered as we are still waiting for the + // specified period + if event.ForQuiescence { + continue + } + + // The template might already exist on disk with the exact contents, but + // we still want to count that as "rendered" [GH-1000]. + if !event.DidRender && !event.WouldRender { + return false + } + } + + return true +} + +// childEnv creates a map of environment variables for child processes to have +// access to configurations in Consul Template's configuration. +func (r *Runner) childEnv() []string { + var m = make(map[string]string) + + if config.StringPresent(r.config.Consul.Address) { + m["CONSUL_HTTP_ADDR"] = config.StringVal(r.config.Consul.Address) + } + + if config.BoolVal(r.config.Consul.Auth.Enabled) { + m["CONSUL_HTTP_AUTH"] = r.config.Consul.Auth.String() + } + + m["CONSUL_HTTP_SSL"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Enabled)) + m["CONSUL_HTTP_SSL_VERIFY"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Verify)) + + if config.StringPresent(r.config.Vault.Address) { + m["VAULT_ADDR"] = config.StringVal(r.config.Vault.Address) + } + + if !config.BoolVal(r.config.Vault.SSL.Verify) { + m["VAULT_SKIP_VERIFY"] = "true" + } + + if config.StringPresent(r.config.Vault.SSL.Cert) { + m["VAULT_CLIENT_CERT"] = config.StringVal(r.config.Vault.SSL.Cert) + } + + if config.StringPresent(r.config.Vault.SSL.Key) { + m["VAULT_CLIENT_KEY"] = config.StringVal(r.config.Vault.SSL.Key) + } + + if config.StringPresent(r.config.Vault.SSL.CaPath) { + m["VAULT_CAPATH"] = config.StringVal(r.config.Vault.SSL.CaPath) + } + + if config.StringPresent(r.config.Vault.SSL.CaCert) { + m["VAULT_CACERT"] = config.StringVal(r.config.Vault.SSL.CaCert) + } + + if config.StringPresent(r.config.Vault.SSL.ServerName) { + m["VAULT_TLS_SERVER_NAME"] = config.StringVal(r.config.Vault.SSL.ServerName) + } + + // Append runner-supplied env (this is supplied programmatically). + for k, v := range r.Env { + m[k] = v + } + + e := make([]string, 0, len(m)) + for k, v := range m { + e = append(e, k+"="+v) + } + return e +} + +// storePid is used to write out a PID file to disk. +func (r *Runner) storePid() error { + path := config.StringVal(r.config.PidFile) + if path == "" { + return nil + } + + log.Printf("[INFO] creating pid file at %q", path) + + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + return fmt.Errorf("runner: could not open pid file: %s", err) + } + defer f.Close() + + pid := os.Getpid() + _, err = f.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("runner: could not write to pid file: %s", err) + } + return nil +} + +// deletePid is used to remove the PID on exit. +func (r *Runner) deletePid() error { + path := config.StringVal(r.config.PidFile) + if path == "" { + return nil + } + + log.Printf("[DEBUG] removing pid file at %q", path) + + stat, err := os.Stat(path) + if err != nil { + return fmt.Errorf("runner: could not remove pid file: %s", err) + } + if stat.IsDir() { + return fmt.Errorf("runner: specified pid file path is directory") + } + + err = os.Remove(path) + if err != nil { + return fmt.Errorf("runner: could not remove pid file: %s", err) + } + return nil +} + +// SetOutStream modifies runner output stream. Defaults to stdout. +func (r *Runner) SetOutStream(out io.Writer) { + r.outStream = out +} + +// SetErrStream modifies runner error stream. Defaults to stderr. +func (r *Runner) SetErrStream(err io.Writer) { + r.errStream = err +} + +// spawnChildInput is used as input to spawn a child process. +type spawnChildInput struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Command string + Timeout time.Duration + Env []string + ReloadSignal os.Signal + KillSignal os.Signal + KillTimeout time.Duration + Splay time.Duration +} + +// spawnChild spawns a child process with the given inputs and returns the +// resulting child. +func spawnChild(i *spawnChildInput) (*child.Child, error) { + p := shellwords.NewParser() + p.ParseEnv = true + p.ParseBacktick = true + args, err := p.Parse(i.Command) + if err != nil { + return nil, errors.Wrap(err, "failed parsing command") + } + + child, err := child.New(&child.NewInput{ + Stdin: i.Stdin, + Stdout: i.Stdout, + Stderr: i.Stderr, + Command: args[0], + Args: args[1:], + Env: i.Env, + Timeout: i.Timeout, + ReloadSignal: i.ReloadSignal, + KillSignal: i.KillSignal, + KillTimeout: i.KillTimeout, + Splay: i.Splay, + }) + if err != nil { + return nil, errors.Wrap(err, "error creating child") + } + + if err := child.Start(); err != nil { + return nil, errors.Wrap(err, "child") + } + return child, nil +} + +// quiescence is an internal representation of a single template's quiescence +// state. +type quiescence struct { + template *template.Template + min time.Duration + max time.Duration + ch chan *template.Template + timer *time.Timer + deadline time.Time +} + +// newQuiescence creates a new quiescence timer for the given template. +func newQuiescence(ch chan *template.Template, min, max time.Duration, t *template.Template) *quiescence { + return &quiescence{ + template: t, + min: min, + max: max, + ch: ch, + } +} + +// tick updates the minimum quiescence timer. +func (q *quiescence) tick() { + now := time.Now() + + // If this is the first tick, set up the timer and calculate the max + // deadline. + if q.timer == nil { + q.timer = time.NewTimer(q.min) + go func() { + select { + case <-q.timer.C: + q.ch <- q.template + } + }() + + q.deadline = now.Add(q.max) + return + } + + // Snooze the timer for the min time, or snooze less if we are coming + // up against the max time. If the timer has already fired and the reset + // doesn't work that's ok because we guarantee that the channel gets our + // template which means that we are obsolete and a fresh quiescence will + // be set up. + if now.Add(q.min).Before(q.deadline) { + q.timer.Reset(q.min) + } else if dur := q.deadline.Sub(now); dur > 0 { + q.timer.Reset(dur) + } +} + +// findCommand searches the list of template configs for the given command and +// returns it if it exists. +func findCommand(c *config.TemplateConfig, templates []*config.TemplateConfig) *config.TemplateConfig { + needle := config.StringVal(c.Exec.Command) + for _, t := range templates { + if needle == config.StringVal(t.Exec.Command) { + return t + } + } + return nil +} + +// newClientSet creates a new client set from the given config. +func newClientSet(c *config.Config) (*dep.ClientSet, error) { + clients := dep.NewClientSet() + + if err := clients.CreateConsulClient(&dep.CreateConsulClientInput{ + Address: config.StringVal(c.Consul.Address), + Token: config.StringVal(c.Consul.Token), + AuthEnabled: config.BoolVal(c.Consul.Auth.Enabled), + AuthUsername: config.StringVal(c.Consul.Auth.Username), + AuthPassword: config.StringVal(c.Consul.Auth.Password), + SSLEnabled: config.BoolVal(c.Consul.SSL.Enabled), + SSLVerify: config.BoolVal(c.Consul.SSL.Verify), + SSLCert: config.StringVal(c.Consul.SSL.Cert), + SSLKey: config.StringVal(c.Consul.SSL.Key), + SSLCACert: config.StringVal(c.Consul.SSL.CaCert), + SSLCAPath: config.StringVal(c.Consul.SSL.CaPath), + ServerName: config.StringVal(c.Consul.SSL.ServerName), + TransportDialKeepAlive: config.TimeDurationVal(c.Consul.Transport.DialKeepAlive), + TransportDialTimeout: config.TimeDurationVal(c.Consul.Transport.DialTimeout), + TransportDisableKeepAlives: config.BoolVal(c.Consul.Transport.DisableKeepAlives), + TransportIdleConnTimeout: config.TimeDurationVal(c.Consul.Transport.IdleConnTimeout), + TransportMaxIdleConns: config.IntVal(c.Consul.Transport.MaxIdleConns), + TransportMaxIdleConnsPerHost: config.IntVal(c.Consul.Transport.MaxIdleConnsPerHost), + TransportTLSHandshakeTimeout: config.TimeDurationVal(c.Consul.Transport.TLSHandshakeTimeout), + }); err != nil { + return nil, fmt.Errorf("runner: %s", err) + } + + if err := clients.CreateVaultClient(&dep.CreateVaultClientInput{ + Address: config.StringVal(c.Vault.Address), + Namespace: config.StringVal(c.Vault.Namespace), + Token: config.StringVal(c.Vault.Token), + UnwrapToken: config.BoolVal(c.Vault.UnwrapToken), + SSLEnabled: config.BoolVal(c.Vault.SSL.Enabled), + SSLVerify: config.BoolVal(c.Vault.SSL.Verify), + SSLCert: config.StringVal(c.Vault.SSL.Cert), + SSLKey: config.StringVal(c.Vault.SSL.Key), + SSLCACert: config.StringVal(c.Vault.SSL.CaCert), + SSLCAPath: config.StringVal(c.Vault.SSL.CaPath), + ServerName: config.StringVal(c.Vault.SSL.ServerName), + TransportDialKeepAlive: config.TimeDurationVal(c.Vault.Transport.DialKeepAlive), + TransportDialTimeout: config.TimeDurationVal(c.Vault.Transport.DialTimeout), + TransportDisableKeepAlives: config.BoolVal(c.Vault.Transport.DisableKeepAlives), + TransportIdleConnTimeout: config.TimeDurationVal(c.Vault.Transport.IdleConnTimeout), + TransportMaxIdleConns: config.IntVal(c.Vault.Transport.MaxIdleConns), + TransportMaxIdleConnsPerHost: config.IntVal(c.Vault.Transport.MaxIdleConnsPerHost), + TransportTLSHandshakeTimeout: config.TimeDurationVal(c.Vault.Transport.TLSHandshakeTimeout), + }); err != nil { + return nil, fmt.Errorf("runner: %s", err) + } + + return clients, nil +} + +// newWatcher creates a new watcher. +func newWatcher(c *config.Config, clients *dep.ClientSet, once bool) (*watch.Watcher, error) { + log.Printf("[INFO] (runner) creating watcher") + + w, err := watch.NewWatcher(&watch.NewWatcherInput{ + Clients: clients, + MaxStale: config.TimeDurationVal(c.MaxStale), + Once: c.Once, + RenewVault: clients.Vault().Token() != "" && config.BoolVal(c.Vault.RenewToken), + VaultAgentTokenFile: config.StringVal(c.Vault.VaultAgentTokenFile), + RetryFuncConsul: watch.RetryFunc(c.Consul.Retry.RetryFunc()), + // TODO: Add a sane default retry - right now this only affects "local" + // dependencies like reading a file from disk. + RetryFuncDefault: nil, + RetryFuncVault: watch.RetryFunc(c.Vault.Retry.RetryFunc()), + VaultGrace: config.TimeDurationVal(c.Vault.Grace), + VaultToken: clients.Vault().Token(), + }) + if err != nil { + return nil, errors.Wrap(err, "runner") + } + return w, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/renderer/file_perms.go b/vendor/github.com/hashicorp/consul-template/renderer/file_perms.go new file mode 100644 index 000000000..d89b2f02c --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/renderer/file_perms.go @@ -0,0 +1,22 @@ +//+build !windows + +package renderer + +import ( + "os" + "syscall" +) + +func preserveFilePermissions(path string, fileInfo os.FileInfo) error { + sysInfo := fileInfo.Sys() + if sysInfo != nil { + stat, ok := sysInfo.(*syscall.Stat_t) + if ok { + if err := os.Chown(path, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/consul-template/renderer/file_perms_windows.go b/vendor/github.com/hashicorp/consul-template/renderer/file_perms_windows.go new file mode 100644 index 000000000..cae35cf51 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/renderer/file_perms_windows.go @@ -0,0 +1,9 @@ +//+build windows + +package renderer + +import "os" + +func preserveFilePermissions(path string, fileInfo os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/hashicorp/consul-template/renderer/renderer.go b/vendor/github.com/hashicorp/consul-template/renderer/renderer.go new file mode 100644 index 000000000..59931c19e --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/renderer/renderer.go @@ -0,0 +1,182 @@ +package renderer + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +const ( + // DefaultFilePerms are the default file permissions for files rendered onto + // disk when a specific file permission has not already been specified. + DefaultFilePerms = 0644 +) + +var ( + // ErrNoParentDir is the error returned with the parent directory is missing + // and the user disabled it. + ErrNoParentDir = errors.New("parent directory is missing") + + // ErrMissingDest is the error returned with the destination is empty. + ErrMissingDest = errors.New("missing destination") +) + +// RenderInput is used as input to the render function. +type RenderInput struct { + Backup bool + Contents []byte + CreateDestDirs bool + Dry bool + DryStream io.Writer + Path string + Perms os.FileMode +} + +// RenderResult is returned and stored. It contains the status of the render +// operation. +type RenderResult struct { + // DidRender indicates if the template rendered to disk. This will be false in + // the event of an error, but it will also be false in dry mode or when the + // template on disk matches the new result. + DidRender bool + + // WouldRender indicates if the template would have rendered to disk. This + // will return false in the event of an error, but will return true in dry + // mode or when the template on disk matches the new result. + WouldRender bool + + // Contents are the actual contents of the resulting template from the render + // operation. + Contents []byte +} + +// Render atomically renders a file contents to disk, returning a result of +// whether it would have rendered and actually did render. +func Render(i *RenderInput) (*RenderResult, error) { + existing, err := ioutil.ReadFile(i.Path) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "failed reading file") + } + + if bytes.Equal(existing, i.Contents) { + return &RenderResult{ + DidRender: false, + WouldRender: true, + Contents: existing, + }, nil + } + + if i.Dry { + fmt.Fprintf(i.DryStream, "> %s\n%s", i.Path, i.Contents) + } else { + if err := AtomicWrite(i.Path, i.CreateDestDirs, i.Contents, i.Perms, i.Backup); err != nil { + return nil, errors.Wrap(err, "failed writing file") + } + } + + return &RenderResult{ + DidRender: true, + WouldRender: true, + Contents: i.Contents, + }, nil +} + +// AtomicWrite accepts a destination path and the template contents. It writes +// the template contents to a TempFile on disk, returning if any errors occur. +// +// If the parent destination directory does not exist, it will be created +// automatically with permissions 0755. To use a different permission, create +// the directory first or use `chmod` in a Command. +// +// If the destination path exists, all attempts will be made to preserve the +// existing file permissions. If those permissions cannot be read, an error is +// returned. If the file does not exist, it will be created automatically with +// permissions 0644. To use a different permission, create the destination file +// first or use `chmod` in a Command. +// +// If no errors occur, the Tempfile is "renamed" (moved) to the destination +// path. +func AtomicWrite(path string, createDestDirs bool, contents []byte, perms os.FileMode, backup bool) error { + if path == "" { + return ErrMissingDest + } + + parent := filepath.Dir(path) + if _, err := os.Stat(parent); os.IsNotExist(err) { + if createDestDirs { + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + } else { + return ErrNoParentDir + } + } + + f, err := ioutil.TempFile(parent, "") + if err != nil { + return err + } + defer os.Remove(f.Name()) + + if _, err := f.Write(contents); err != nil { + return err + } + + if err := f.Sync(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + // If the user did not explicitly set permissions, attempt to lookup the + // current permissions on the file. If the file does not exist, fall back to + // the default. Otherwise, inherit the current permissions. + if perms == 0 { + currentInfo, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + perms = DefaultFilePerms + } else { + return err + } + } else { + perms = currentInfo.Mode() + + // The file exists, so try to preserve the ownership as well. + if err := preserveFilePermissions(f.Name(), currentInfo); err != nil { + log.Printf("[WARN] (runner) could not preserve file permissions for %q: %v", + f.Name(), err) + } + } + } + + if err := os.Chmod(f.Name(), perms); err != nil { + return err + } + + // If we got this far, it means we are about to save the file. Copy the + // current file so we have a backup. Note that os.Link preserves the Mode. + if backup { + bak, old := path+".bak", path+".old.bak" + os.Rename(bak, old) // ignore error + if err := os.Link(path, bak); err != nil { + log.Printf("[WARN] (runner) could not backup %q: %v", path, err) + } else { + os.Remove(old) // ignore error + } + } + + if err := os.Rename(f.Name(), path); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go b/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go new file mode 100644 index 000000000..f21cbd5d6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/mapstructure.go @@ -0,0 +1,32 @@ +package signals + +import ( + "reflect" + + "github.com/mitchellh/mapstructure" +) + +// StringToSignalFunc parses a string as a signal based on the signal lookup +// table. If the user supplied an empty string or nil, a special "nil signal" +// is returned. Clients should check for this value and set the response back +// nil after mapstructure finishes parsing. +func StringToSignalFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + + if t.String() != "os.Signal" { + return data, nil + } + + if data == nil || data.(string) == "" { + return SIGNIL, nil + } + + return Parse(data.(string)) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/nil.go b/vendor/github.com/hashicorp/consul-template/signals/nil.go new file mode 100644 index 000000000..2c20645b3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/nil.go @@ -0,0 +1,7 @@ +package signals + +// NilSignal is a special signal that is blank or "nil" +type NilSignal int + +func (s *NilSignal) String() string { return "SIGNIL" } +func (s *NilSignal) Signal() {} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals.go b/vendor/github.com/hashicorp/consul-template/signals/signals.go new file mode 100644 index 000000000..dacc3e62c --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals.go @@ -0,0 +1,35 @@ +package signals + +import ( + "fmt" + "os" + "sort" + "strings" +) + +// SIGNIL is the nil signal. +var SIGNIL os.Signal = new(NilSignal) + +// ValidSignals is the list of all valid signals. This is built at runtime +// because it is OS-dependent. +var ValidSignals []string + +func init() { + valid := make([]string, 0, len(SignalLookup)) + for k := range SignalLookup { + valid = append(valid, k) + } + sort.Strings(valid) + ValidSignals = valid +} + +// Parse parses the given string as a signal. If the signal is not found, +// an error is returned. +func Parse(s string) (os.Signal, error) { + sig, ok := SignalLookup[strings.ToUpper(s)] + if !ok { + return nil, fmt.Errorf("invalid signal %q - valid signals are %q", + s, ValidSignals) + } + return sig, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go b/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go new file mode 100644 index 000000000..0b614e93b --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals_unix.go @@ -0,0 +1,40 @@ +// +build linux darwin freebsd openbsd solaris netbsd + +package signals + +import ( + "os" + "syscall" +) + +var SignalLookup = map[string]os.Signal{ + "SIGABRT": syscall.SIGABRT, + "SIGALRM": syscall.SIGALRM, + "SIGBUS": syscall.SIGBUS, + "SIGCHLD": syscall.SIGCHLD, + "SIGCONT": syscall.SIGCONT, + "SIGFPE": syscall.SIGFPE, + "SIGHUP": syscall.SIGHUP, + "SIGILL": syscall.SIGILL, + "SIGINT": syscall.SIGINT, + "SIGIO": syscall.SIGIO, + "SIGIOT": syscall.SIGIOT, + "SIGKILL": syscall.SIGKILL, + "SIGPIPE": syscall.SIGPIPE, + "SIGPROF": syscall.SIGPROF, + "SIGQUIT": syscall.SIGQUIT, + "SIGSEGV": syscall.SIGSEGV, + "SIGSTOP": syscall.SIGSTOP, + "SIGSYS": syscall.SIGSYS, + "SIGTERM": syscall.SIGTERM, + "SIGTRAP": syscall.SIGTRAP, + "SIGTSTP": syscall.SIGTSTP, + "SIGTTIN": syscall.SIGTTIN, + "SIGTTOU": syscall.SIGTTOU, + "SIGURG": syscall.SIGURG, + "SIGUSR1": syscall.SIGUSR1, + "SIGUSR2": syscall.SIGUSR2, + "SIGWINCH": syscall.SIGWINCH, + "SIGXCPU": syscall.SIGXCPU, + "SIGXFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go b/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go new file mode 100644 index 000000000..e1204a67d --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/signals/signals_windows.go @@ -0,0 +1,24 @@ +// +build windows + +package signals + +import ( + "os" + "syscall" +) + +var SignalLookup = map[string]os.Signal{ + "SIGABRT": syscall.SIGABRT, + "SIGALRM": syscall.SIGALRM, + "SIGBUS": syscall.SIGBUS, + "SIGFPE": syscall.SIGFPE, + "SIGHUP": syscall.SIGHUP, + "SIGILL": syscall.SIGILL, + "SIGINT": syscall.SIGINT, + "SIGKILL": syscall.SIGKILL, + "SIGPIPE": syscall.SIGPIPE, + "SIGQUIT": syscall.SIGQUIT, + "SIGSEGV": syscall.SIGSEGV, + "SIGTERM": syscall.SIGTERM, + "SIGTRAP": syscall.SIGTRAP, +} diff --git a/vendor/github.com/hashicorp/consul-template/template/brain.go b/vendor/github.com/hashicorp/consul-template/template/brain.go new file mode 100644 index 000000000..149fc4f9f --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/brain.go @@ -0,0 +1,74 @@ +package template + +import ( + "sync" + + dep "github.com/hashicorp/consul-template/dependency" +) + +// Brain is what Template uses to determine the values that are +// available for template parsing. +type Brain struct { + sync.RWMutex + + // data is the map of individual dependencies and the most recent data for + // that dependency. + data map[string]interface{} + + // receivedData is an internal tracker of which dependencies have stored data + // in the brain. + receivedData map[string]struct{} +} + +// NewBrain creates a new Brain with empty values for each +// of the key structs. +func NewBrain() *Brain { + return &Brain{ + data: make(map[string]interface{}), + receivedData: make(map[string]struct{}), + } +} + +// Remember accepts a dependency and the data to store associated with that +// dep. This function converts the given data to a proper type and stores +// it interally. +func (b *Brain) Remember(d dep.Dependency, data interface{}) { + b.Lock() + defer b.Unlock() + + b.data[d.String()] = data + b.receivedData[d.String()] = struct{}{} +} + +// Recall gets the current value for the given dependency in the Brain. +func (b *Brain) Recall(d dep.Dependency) (interface{}, bool) { + b.RLock() + defer b.RUnlock() + + // If we have not received data for this dependency, return now. + if _, ok := b.receivedData[d.String()]; !ok { + return nil, false + } + + return b.data[d.String()], true +} + +// ForceSet is used to force set the value of a dependency +// for a given hash code +func (b *Brain) ForceSet(hashCode string, data interface{}) { + b.Lock() + defer b.Unlock() + + b.data[hashCode] = data + b.receivedData[hashCode] = struct{}{} +} + +// Forget accepts a dependency and removes all associated data with this +// dependency. It also resets the "receivedData" internal map. +func (b *Brain) Forget(d dep.Dependency) { + b.Lock() + defer b.Unlock() + + delete(b.data, d.String()) + delete(b.receivedData, d.String()) +} diff --git a/vendor/github.com/hashicorp/consul-template/template/funcs.go b/vendor/github.com/hashicorp/consul-template/template/funcs.go new file mode 100644 index 000000000..2114279ca --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/funcs.go @@ -0,0 +1,1322 @@ +package template + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "text/template" + "time" + + "github.com/BurntSushi/toml" + dep "github.com/hashicorp/consul-template/dependency" + socktmpl "github.com/hashicorp/go-sockaddr/template" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" +) + +// now is function that represents the current time in UTC. This is here +// primarily for the tests to override times. +var now = func() time.Time { return time.Now().UTC() } + +// datacentersFunc returns or accumulates datacenter dependencies. +func datacentersFunc(b *Brain, used, missing *dep.Set) func(ignore ...bool) ([]string, error) { + return func(i ...bool) ([]string, error) { + result := []string{} + + var ignore bool + switch len(i) { + case 0: + ignore = false + case 1: + ignore = i[0] + default: + return result, fmt.Errorf("datacenters: wrong number of arguments, expected 0 or 1"+ + ", but got %d", len(i)) + } + + d, err := dep.NewCatalogDatacentersQuery(ignore) + if err != nil { + return result, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.([]string), nil + } + + missing.Add(d) + + return result, nil + } +} + +// envFunc returns a function which checks the value of an environment variable. +// Invokers can specify their own environment, which takes precedences over any +// real environment variables +func envFunc(env []string) func(string) (string, error) { + return func(s string) (string, error) { + for _, e := range env { + split := strings.SplitN(e, "=", 2) + k, v := split[0], split[1] + if k == s { + return v, nil + } + } + return os.Getenv(s), nil + } +} + +// executeTemplateFunc executes the given template in the context of the +// parent. If an argument is specified, it will be used as the context instead. +// This can be used for nested template definitions. +func executeTemplateFunc(t *template.Template) func(string, ...interface{}) (string, error) { + return func(s string, data ...interface{}) (string, error) { + var dot interface{} + switch len(data) { + case 0: + dot = nil + case 1: + dot = data[0] + default: + return "", fmt.Errorf("executeTemplate: wrong number of arguments, expected 1 or 2"+ + ", but got %d", len(data)+1) + } + var b bytes.Buffer + if err := t.ExecuteTemplate(&b, s, dot); err != nil { + return "", err + } + return b.String(), nil + } +} + +// fileFunc returns or accumulates file dependencies. +func fileFunc(b *Brain, used, missing *dep.Set, sandboxPath string) func(string) (string, error) { + return func(s string) (string, error) { + if len(s) == 0 { + return "", nil + } + err := pathInSandbox(sandboxPath, s) + if err != nil { + return "", err + } + d, err := dep.NewFileQuery(s) + if err != nil { + return "", err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + if value == nil { + return "", nil + } + return value.(string), nil + } + + missing.Add(d) + + return "", nil + } +} + +// keyFunc returns or accumulates key dependencies. +func keyFunc(b *Brain, used, missing *dep.Set) func(string) (string, error) { + return func(s string) (string, error) { + if len(s) == 0 { + return "", nil + } + + d, err := dep.NewKVGetQuery(s) + if err != nil { + return "", err + } + d.EnableBlocking() + + used.Add(d) + + if value, ok := b.Recall(d); ok { + if value == nil { + return "", nil + } + return value.(string), nil + } + + missing.Add(d) + + return "", nil + } +} + +// keyExistsFunc returns true if a key exists, false otherwise. +func keyExistsFunc(b *Brain, used, missing *dep.Set) func(string) (bool, error) { + return func(s string) (bool, error) { + if len(s) == 0 { + return false, nil + } + + d, err := dep.NewKVGetQuery(s) + if err != nil { + return false, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value != nil, nil + } + + missing.Add(d) + + return false, nil + } +} + +// keyWithDefaultFunc returns or accumulates key dependencies that have a +// default value. +func keyWithDefaultFunc(b *Brain, used, missing *dep.Set) func(string, string) (string, error) { + return func(s, def string) (string, error) { + if len(s) == 0 { + return def, nil + } + + d, err := dep.NewKVGetQuery(s) + if err != nil { + return "", err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + if value == nil || value.(string) == "" { + return def, nil + } + return value.(string), nil + } + + missing.Add(d) + + return def, nil + } +} + +func safeLsFunc(b *Brain, used, missing *dep.Set) func(string) ([]*dep.KeyPair, error) { + // call lsFunc but explicitly mark that empty data set returned on monitored KV prefix is NOT safe + return lsFunc(b, used, missing, false) +} + +// lsFunc returns or accumulates keyPrefix dependencies. +func lsFunc(b *Brain, used, missing *dep.Set, emptyIsSafe bool) func(string) ([]*dep.KeyPair, error) { + return func(s string) ([]*dep.KeyPair, error) { + result := []*dep.KeyPair{} + + if len(s) == 0 { + return result, nil + } + + d, err := dep.NewKVListQuery(s) + if err != nil { + return result, err + } + + used.Add(d) + + // Only return non-empty top-level keys + if value, ok := b.Recall(d); ok { + for _, pair := range value.([]*dep.KeyPair) { + if pair.Key != "" && !strings.Contains(pair.Key, "/") { + result = append(result, pair) + } + } + + if len(result) == 0 { + if emptyIsSafe { + // Operator used potentially unsafe ls function in the template instead of the safeLs + return result, nil + } + } else { + // non empty result is good so we just return the data + return result, nil + } + + // If we reach this part of the code result is completely empty as value returned no KV pairs + // Operator selected to use safeLs on the specific KV prefix so we will refuse to render template + // by marking d as missing + } + + // b.Recall either returned an error or safeLs entered unsafe case + missing.Add(d) + + return result, nil + } +} + +// nodeFunc returns or accumulates catalog node dependency. +func nodeFunc(b *Brain, used, missing *dep.Set) func(...string) (*dep.CatalogNode, error) { + return func(s ...string) (*dep.CatalogNode, error) { + + d, err := dep.NewCatalogNodeQuery(strings.Join(s, "")) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.(*dep.CatalogNode), nil + } + + missing.Add(d) + + return nil, nil + } +} + +// nodesFunc returns or accumulates catalog node dependencies. +func nodesFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.Node, error) { + return func(s ...string) ([]*dep.Node, error) { + result := []*dep.Node{} + + d, err := dep.NewCatalogNodesQuery(strings.Join(s, "")) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.([]*dep.Node), nil + } + + missing.Add(d) + + return result, nil + } +} + +// secretFunc returns or accumulates secret dependencies from Vault. +func secretFunc(b *Brain, used, missing *dep.Set) func(...string) (*dep.Secret, error) { + return func(s ...string) (*dep.Secret, error) { + var result *dep.Secret + + if len(s) == 0 { + return result, nil + } + + // TODO: Refactor into separate template functions + path, rest := s[0], s[1:] + data := make(map[string]interface{}) + for _, str := range rest { + parts := strings.SplitN(str, "=", 2) + if len(parts) != 2 { + return result, fmt.Errorf("not k=v pair %q", str) + } + + k, v := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + data[k] = v + } + + var d dep.Dependency + var err error + + if len(rest) == 0 { + d, err = dep.NewVaultReadQuery(path) + } else { + d, err = dep.NewVaultWriteQuery(path, data) + } + + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + result = value.(*dep.Secret) + return result, nil + } + + missing.Add(d) + + return result, nil + } +} + +// secretsFunc returns or accumulates a list of secret dependencies from Vault. +func secretsFunc(b *Brain, used, missing *dep.Set) func(string) ([]string, error) { + return func(s string) ([]string, error) { + var result []string + + if len(s) == 0 { + return result, nil + } + + d, err := dep.NewVaultListQuery(s) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + result = value.([]string) + return result, nil + } + + missing.Add(d) + + return result, nil + } +} + +// byMeta returns Services grouped by one or many ServiceMeta fields. +func byMeta(meta string, services []*dep.HealthService) (groups map[string][]*dep.HealthService, err error) { + re := regexp.MustCompile("[^a-zA-Z0-9_-]") + normalize := func(x string) string { + return re.ReplaceAllString(x, "_") + } + getOrDefault := func(m map[string]string, key string) string { + realKey := strings.TrimSuffix(key, "|int") + if val, ok := m[realKey]; ok { + if val != "" { + return val + } + } + if strings.HasSuffix(key, "|int") { + return "0" + } + return fmt.Sprintf("_no_%s_", realKey) + } + + metas := strings.Split(meta, ",") + + groups = make(map[string][]*dep.HealthService) + + for _, s := range services { + sm := s.ServiceMeta + keyParts := []string{} + for _, meta := range metas { + value := getOrDefault(sm, meta) + if strings.HasSuffix(meta, "|int") { + value = getOrDefault(sm, meta) + i, err := strconv.Atoi(value) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("cannot parse %v as number ", value)) + } + value = fmt.Sprintf("%05d", i) + } + keyParts = append(keyParts, normalize(value)) + } + key := strings.Join(keyParts, "_") + groups[key] = append(groups[key], s) + } + + return groups, nil +} + +// serviceFunc returns or accumulates health service dependencies. +func serviceFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.HealthService, error) { + return func(s ...string) ([]*dep.HealthService, error) { + result := []*dep.HealthService{} + + if len(s) == 0 || s[0] == "" { + return result, nil + } + + d, err := dep.NewHealthServiceQuery(strings.Join(s, "|")) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.([]*dep.HealthService), nil + } + + missing.Add(d) + + return result, nil + } +} + +// servicesFunc returns or accumulates catalog services dependencies. +func servicesFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.CatalogSnippet, error) { + return func(s ...string) ([]*dep.CatalogSnippet, error) { + result := []*dep.CatalogSnippet{} + + d, err := dep.NewCatalogServicesQuery(strings.Join(s, "")) + if err != nil { + return nil, err + } + + used.Add(d) + + if value, ok := b.Recall(d); ok { + return value.([]*dep.CatalogSnippet), nil + } + + missing.Add(d) + + return result, nil + } +} + +func safeTreeFunc(b *Brain, used, missing *dep.Set) func(string) ([]*dep.KeyPair, error) { + // call treeFunc but explicitly mark that empty data set returned on monitored KV prefix is NOT safe + return treeFunc(b, used, missing, false) +} + +// treeFunc returns or accumulates keyPrefix dependencies. +func treeFunc(b *Brain, used, missing *dep.Set, emptyIsSafe bool) func(string) ([]*dep.KeyPair, error) { + return func(s string) ([]*dep.KeyPair, error) { + result := []*dep.KeyPair{} + + if len(s) == 0 { + return result, nil + } + + d, err := dep.NewKVListQuery(s) + if err != nil { + return result, err + } + + used.Add(d) + + // Only return non-empty top-level keys + if value, ok := b.Recall(d); ok { + for _, pair := range value.([]*dep.KeyPair) { + parts := strings.Split(pair.Key, "/") + if parts[len(parts)-1] != "" { + result = append(result, pair) + } + } + + if len(result) == 0 { + if emptyIsSafe { + // Operator used potentially unsafe tree function in the template instead of the safeTree + return result, nil + } + } else { + // non empty result is good so we just return the data + return result, nil + } + + // If we reach this part of the code result is completely empty as value returned no KV pairs + // Operator selected to use safeTree on the specific KV prefix so we will refuse to render template + // by marking d as missing + } + + // b.Recall either returned an error or safeTree entered unsafe case + missing.Add(d) + + return result, nil + } +} + +// base64Decode decodes the given string as a base64 string, returning an error +// if it fails. +func base64Decode(s string) (string, error) { + v, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", errors.Wrap(err, "base64Decode") + } + return string(v), nil +} + +// base64Encode encodes the given value into a string represented as base64. +func base64Encode(s string) (string, error) { + return base64.StdEncoding.EncodeToString([]byte(s)), nil +} + +// base64URLDecode decodes the given string as a URL-safe base64 string. +func base64URLDecode(s string) (string, error) { + v, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return "", errors.Wrap(err, "base64URLDecode") + } + return string(v), nil +} + +// base64URLEncode encodes the given string to be URL-safe. +func base64URLEncode(s string) (string, error) { + return base64.URLEncoding.EncodeToString([]byte(s)), nil +} + +// byKey accepts a slice of KV pairs and returns a map of the top-level +// key to all its subkeys. For example: +// +// elasticsearch/a //=> "1" +// elasticsearch/b //=> "2" +// redis/a/b //=> "3" +// +// Passing the result from Consul through byTag would yield: +// +// map[string]map[string]string{ +// "elasticsearch": &dep.KeyPair{"a": "1"}, &dep.KeyPair{"b": "2"}, +// "redis": &dep.KeyPair{"a/b": "3"} +// } +// +// Note that the top-most key is stripped from the Key value. Keys that have no +// prefix after stripping are removed from the list. +func byKey(pairs []*dep.KeyPair) (map[string]map[string]*dep.KeyPair, error) { + m := make(map[string]map[string]*dep.KeyPair) + for _, pair := range pairs { + parts := strings.Split(pair.Key, "/") + top := parts[0] + key := strings.Join(parts[1:], "/") + + if key == "" { + // Do not add a key if it has no prefix after stripping. + continue + } + + if _, ok := m[top]; !ok { + m[top] = make(map[string]*dep.KeyPair) + } + + newPair := *pair + newPair.Key = key + m[top][key] = &newPair + } + + return m, nil +} + +// byTag is a template func that takes the provided services and +// produces a map based on Service tags. +// +// The map key is a string representing the service tag. The map value is a +// slice of Services which have the tag assigned. +func byTag(in interface{}) (map[string][]interface{}, error) { + m := make(map[string][]interface{}) + + switch typed := in.(type) { + case nil: + case []*dep.CatalogSnippet: + for _, s := range typed { + for _, t := range s.Tags { + m[t] = append(m[t], s) + } + } + case []*dep.CatalogService: + for _, s := range typed { + for _, t := range s.ServiceTags { + m[t] = append(m[t], s) + } + } + case []*dep.HealthService: + for _, s := range typed { + for _, t := range s.Tags { + m[t] = append(m[t], s) + } + } + default: + return nil, fmt.Errorf("byTag: wrong argument type %T", in) + } + + return m, nil +} + +// contains is a function that have reverse arguments of "in" and is designed to +// be used as a pipe instead of a function: +// +// {{ l | contains "thing" }} +// +func contains(v, l interface{}) (bool, error) { + return in(l, v) +} + +// containsSomeFunc returns functions to implement each of the following: +// +// 1. containsAll - true if (∀x ∈ v then x ∈ l); false otherwise +// 2. containsAny - true if (∃x ∈ v such that x ∈ l); false otherwise +// 3. containsNone - true if (∀x ∈ v then x ∉ l); false otherwise +// 2. containsNotAll - true if (∃x ∈ v such that x ∉ l); false otherwise +// +// ret_true - return true at end of loop for none/all; false for any/notall +// invert - invert block test for all/notall +func containsSomeFunc(retTrue, invert bool) func([]interface{}, interface{}) (bool, error) { + return func(v []interface{}, l interface{}) (bool, error) { + for i := 0; i < len(v); i++ { + if ok, _ := in(l, v[i]); ok != invert { + return !retTrue, nil + } + } + return retTrue, nil + } +} + +// explode is used to expand a list of keypairs into a deeply-nested hash. +func explode(pairs []*dep.KeyPair) (map[string]interface{}, error) { + m := make(map[string]interface{}) + for _, pair := range pairs { + if err := explodeHelper(m, pair.Key, pair.Value, pair.Key); err != nil { + return nil, errors.Wrap(err, "explode") + } + } + return m, nil +} + +// explodeHelper is a recursive helper for explode and explodeMap +func explodeHelper(m map[string]interface{}, k string, v interface{}, p string) error { + if strings.Contains(k, "/") { + parts := strings.Split(k, "/") + top := parts[0] + key := strings.Join(parts[1:], "/") + + if _, ok := m[top]; !ok { + m[top] = make(map[string]interface{}) + } + nest, ok := m[top].(map[string]interface{}) + if !ok { + return fmt.Errorf("not a map: %q: %q already has value %q", p, top, m[top]) + } + return explodeHelper(nest, key, v, k) + } + + if k != "" { + m[k] = v + } + + return nil +} + +// explodeMap turns a single-level map into a deeply-nested hash. +func explodeMap(mapIn map[string]interface{}) (map[string]interface{}, error) { + mapOut := make(map[string]interface{}) + + var keys []string + for k := range mapIn { + keys = append(keys, k) + } + sort.Strings(keys) + + for i := range keys { + if err := explodeHelper(mapOut, keys[i], mapIn[keys[i]], keys[i]); err != nil { + return nil, errors.Wrap(err, "explodeMap") + } + } + return mapOut, nil +} + +// in searches for a given value in a given interface. +func in(l, v interface{}) (bool, error) { + lv := reflect.ValueOf(l) + vv := reflect.ValueOf(v) + + switch lv.Kind() { + case reflect.Array, reflect.Slice: + // if the slice contains 'interface' elements, then the element needs to be extracted directly to examine its type, + // otherwise it will just resolve to 'interface'. + var interfaceSlice []interface{} + if reflect.TypeOf(l).Elem().Kind() == reflect.Interface { + interfaceSlice = l.([]interface{}) + } + + for i := 0; i < lv.Len(); i++ { + var lvv reflect.Value + if interfaceSlice != nil { + lvv = reflect.ValueOf(interfaceSlice[i]) + } else { + lvv = lv.Index(i) + } + + switch lvv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch vv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if vv.Int() == lvv.Int() { + return true, nil + } + } + case reflect.Float32, reflect.Float64: + switch vv.Kind() { + case reflect.Float32, reflect.Float64: + if vv.Float() == lvv.Float() { + return true, nil + } + } + case reflect.String: + if vv.Type() == lvv.Type() && vv.String() == lvv.String() { + return true, nil + } + } + } + case reflect.String: + if vv.Type() == lv.Type() && strings.Contains(lv.String(), vv.String()) { + return true, nil + } + } + + return false, nil +} + +// Indent prefixes each line of a string with the specified number of spaces +func indent(spaces int, s string) (string, error) { + if spaces < 0 { + return "", fmt.Errorf("indent value must be a positive integer") + } + var output, prefix []byte + var sp bool + var size int + prefix = []byte(strings.Repeat(" ", spaces)) + sp = true + for _, c := range []byte(s) { + if sp && c != '\n' { + output = append(output, prefix...) + size += spaces + } + output = append(output, c) + sp = c == '\n' + size++ + } + return string(output[:size]), nil +} + +// loop accepts varying parameters and differs its behavior. If given one +// parameter, loop will return a goroutine that begins at 0 and loops until the +// given int, increasing the index by 1 each iteration. If given two parameters, +// loop will return a goroutine that begins at the first parameter and loops +// up to but not including the second parameter. +// +// // Prints 0 1 2 3 4 +// for _, i := range loop(5) { +// print(i) +// } +// +// // Prints 5 6 7 +// for _, i := range loop(5, 8) { +// print(i) +// } +// +func loop(ifaces ...interface{}) (<-chan int64, error) { + + to64 := func(i interface{}) (int64, error) { + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: + return int64(v.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(v.Uint()), nil + case reflect.String: + return parseInt(v.String()) + } + return 0, fmt.Errorf("loop: bad argument type: %T", i) + } + + var i1, i2 interface{} + switch len(ifaces) { + case 1: + i1, i2 = 0, ifaces[0] + case 2: + i1, i2 = ifaces[0], ifaces[1] + default: + return nil, fmt.Errorf("loop: wrong number of arguments, expected "+ + "1 or 2, but got %d", len(ifaces)) + } + + start, err := to64(i1) + if err != nil { + return nil, err + } + stop, err := to64(i2) + if err != nil { + return nil, err + } + + ch := make(chan int64) + + go func() { + for i := start; i < stop; i++ { + ch <- i + } + close(ch) + }() + + return ch, nil +} + +// join is a version of strings.Join that can be piped +func join(sep string, a []string) (string, error) { + return strings.Join(a, sep), nil +} + +// TrimSpace is a version of strings.TrimSpace that can be piped +func trimSpace(s string) (string, error) { + return strings.TrimSpace(s), nil +} + +// parseBool parses a string into a boolean +func parseBool(s string) (bool, error) { + if s == "" { + return false, nil + } + + result, err := strconv.ParseBool(s) + if err != nil { + return false, errors.Wrap(err, "parseBool") + } + return result, nil +} + +// parseFloat parses a string into a base 10 float +func parseFloat(s string) (float64, error) { + if s == "" { + return 0.0, nil + } + + result, err := strconv.ParseFloat(s, 10) + if err != nil { + return 0, errors.Wrap(err, "parseFloat") + } + return result, nil +} + +// parseInt parses a string into a base 10 int +func parseInt(s string) (int64, error) { + if s == "" { + return 0, nil + } + + result, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parseInt") + } + return result, nil +} + +// parseJSON returns a structure for valid JSON +func parseJSON(s string) (interface{}, error) { + if s == "" { + return map[string]interface{}{}, nil + } + + var data interface{} + if err := json.Unmarshal([]byte(s), &data); err != nil { + return nil, err + } + return data, nil +} + +// parseUint parses a string into a base 10 int +func parseUint(s string) (uint64, error) { + if s == "" { + return 0, nil + } + + result, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parseUint") + } + return result, nil +} + +// plugin executes a subprocess as the given command string. It is assumed the +// resulting command returns JSON which is then parsed and returned as the +// value for use in the template. +func plugin(name string, args ...string) (string, error) { + if name == "" { + return "", nil + } + + stdout, stderr := new(bytes.Buffer), new(bytes.Buffer) + + // Strip and trim each arg or else some plugins get confused with the newline + // characters + jsons := make([]string, 0, len(args)) + for _, arg := range args { + if v := strings.TrimSpace(arg); v != "" { + jsons = append(jsons, v) + } + } + + cmd := exec.Command(name, jsons...) + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Start(); err != nil { + return "", fmt.Errorf("exec %q: %s\n\nstdout:\n\n%s\n\nstderr:\n\n%s", + name, err, stdout.Bytes(), stderr.Bytes()) + } + + done := make(chan error, 1) + go func() { + done <- cmd.Wait() + }() + + select { + case <-time.After(30 * time.Second): + if cmd.Process != nil { + if err := cmd.Process.Kill(); err != nil { + return "", fmt.Errorf("exec %q: failed to kill", name) + } + } + <-done // Allow the goroutine to exit + return "", fmt.Errorf("exec %q: did not finish in 30s", name) + case err := <-done: + if err != nil { + return "", fmt.Errorf("exec %q: %s\n\nstdout:\n\n%s\n\nstderr:\n\n%s", + name, err, stdout.Bytes(), stderr.Bytes()) + } + } + + return strings.TrimSpace(stdout.String()), nil +} + +// replaceAll replaces all occurrences of a value in a string with the given +// replacement value. +func replaceAll(f, t, s string) (string, error) { + return strings.Replace(s, f, t, -1), nil +} + +// regexReplaceAll replaces all occurrences of a regular expression with +// the given replacement value. +func regexReplaceAll(re, pl, s string) (string, error) { + compiled, err := regexp.Compile(re) + if err != nil { + return "", err + } + return compiled.ReplaceAllString(s, pl), nil +} + +// regexMatch returns true or false if the string matches +// the given regular expression +func regexMatch(re, s string) (bool, error) { + compiled, err := regexp.Compile(re) + if err != nil { + return false, err + } + return compiled.MatchString(s), nil +} + +// split is a version of strings.Split that can be piped +func split(sep, s string) ([]string, error) { + s = strings.TrimSpace(s) + if s == "" { + return []string{}, nil + } + return strings.Split(s, sep), nil +} + +// timestamp returns the current UNIX timestamp in UTC. If an argument is +// specified, it will be used to format the timestamp. +func timestamp(s ...string) (string, error) { + switch len(s) { + case 0: + return now().Format(time.RFC3339), nil + case 1: + if s[0] == "unix" { + return strconv.FormatInt(now().Unix(), 10), nil + } + return now().Format(s[0]), nil + default: + return "", fmt.Errorf("timestamp: wrong number of arguments, expected 0 or 1"+ + ", but got %d", len(s)) + } +} + +// toLower converts the given string (usually by a pipe) to lowercase. +func toLower(s string) (string, error) { + return strings.ToLower(s), nil +} + +// toJSON converts the given structure into a deeply nested JSON string. +func toJSON(i interface{}) (string, error) { + result, err := json.Marshal(i) + if err != nil { + return "", errors.Wrap(err, "toJSON") + } + return string(bytes.TrimSpace(result)), err +} + +// toJSONPretty converts the given structure into a deeply nested pretty JSON +// string. +func toJSONPretty(m map[string]interface{}) (string, error) { + result, err := json.MarshalIndent(m, "", " ") + if err != nil { + return "", errors.Wrap(err, "toJSONPretty") + } + return string(bytes.TrimSpace(result)), err +} + +// toTitle converts the given string (usually by a pipe) to titlecase. +func toTitle(s string) (string, error) { + return strings.Title(s), nil +} + +// toUpper converts the given string (usually by a pipe) to uppercase. +func toUpper(s string) (string, error) { + return strings.ToUpper(s), nil +} + +// toYAML converts the given structure into a deeply nested YAML string. +func toYAML(m map[string]interface{}) (string, error) { + result, err := yaml.Marshal(m) + if err != nil { + return "", errors.Wrap(err, "toYAML") + } + return string(bytes.TrimSpace(result)), nil +} + +// toTOML converts the given structure into a deeply nested TOML string. +func toTOML(m map[string]interface{}) (string, error) { + buf := bytes.NewBuffer([]byte{}) + enc := toml.NewEncoder(buf) + if err := enc.Encode(m); err != nil { + return "", errors.Wrap(err, "toTOML") + } + result, err := ioutil.ReadAll(buf) + if err != nil { + return "", errors.Wrap(err, "toTOML") + } + return string(bytes.TrimSpace(result)), nil +} + +// add returns the sum of a and b. +func add(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() + bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() + int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) + bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() + bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() + float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() + float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() + bv.Float(), nil + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("add: unknown type for %q (%T)", av, a) + } +} + +// subtract returns the difference of b from a. +func subtract(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() - bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() - int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) - bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() - bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() - float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() - float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() - bv.Float(), nil + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("subtract: unknown type for %q (%T)", av, a) + } +} + +// multiply returns the product of a and b. +func multiply(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() * bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() * int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) * bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() * bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() * float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() * float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() * bv.Float(), nil + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("multiply: unknown type for %q (%T)", av, a) + } +} + +// divide returns the division of b from a. +func divide(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() / bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() / int64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return float64(av.Int()) / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) / bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() / bv.Uint(), nil + case reflect.Float32, reflect.Float64: + return float64(av.Uint()) / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + case reflect.Float32, reflect.Float64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Float() / float64(bv.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Float() / float64(bv.Uint()), nil + case reflect.Float32, reflect.Float64: + return av.Float() / bv.Float(), nil + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("divide: unknown type for %q (%T)", av, a) + } +} + +// modulo returns the modulo of b from a. +func modulo(b, a interface{}) (interface{}, error) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() % bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Int() % int64(bv.Uint()), nil + default: + return nil, fmt.Errorf("modulo: unknown type for %q (%T)", bv, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch bv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64(av.Uint()) % bv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return av.Uint() % bv.Uint(), nil + default: + return nil, fmt.Errorf("modulo: unknown type for %q (%T)", bv, b) + } + default: + return nil, fmt.Errorf("modulo: unknown type for %q (%T)", av, a) + } +} + +// blacklisted always returns an error, to be used in place of blacklisted template functions +func blacklisted(...string) (string, error) { + return "", errors.New("function is disabled") +} + +// pathInSandbox returns an error if the provided path doesn't fall within the +// sandbox or if the file can't be evaluated (missing, invalid symlink, etc.) +func pathInSandbox(sandbox, path string) error { + if sandbox != "" { + s, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + s, err = filepath.Rel(sandbox, s) + if err != nil { + return err + } + if strings.HasPrefix(s, "..") { + return fmt.Errorf("'%s' is outside of sandbox", path) + } + } + return nil +} + +// sockaddr wraps go-sockaddr templating +func sockaddr(args ...string) (string, error) { + t := fmt.Sprintf("{{ %s }} ", strings.Join(args, " ")) + k, err := socktmpl.Parse(t) + if err != nil { + return "", err + } + return k, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/template/scratch.go b/vendor/github.com/hashicorp/consul-template/template/scratch.go new file mode 100644 index 000000000..c3d959dc8 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/scratch.go @@ -0,0 +1,125 @@ +package template + +import ( + "fmt" + "sort" + "sync" +) + +// Scratch is a wrapper around a map which is used by the template. +type Scratch struct { + once sync.Once + sync.RWMutex + values map[string]interface{} +} + +// Key returns a boolean indicating whether the given key exists in the map. +func (s *Scratch) Key(k string) bool { + s.RLock() + defer s.RUnlock() + _, ok := s.values[k] + return ok +} + +// Get returns a value previously set by Add or Set +func (s *Scratch) Get(k string) interface{} { + s.RLock() + defer s.RUnlock() + return s.values[k] +} + +// Set stores the value v at the key k. It will overwrite an existing value +// if present. +func (s *Scratch) Set(k string, v interface{}) string { + s.init() + + s.Lock() + defer s.Unlock() + s.values[k] = v + return "" +} + +// SetX behaves the same as Set, except it will not overwrite existing keys if +// already present. +func (s *Scratch) SetX(k string, v interface{}) string { + s.init() + + s.Lock() + defer s.Unlock() + if _, ok := s.values[k]; !ok { + s.values[k] = v + } + return "" +} + +// MapSet stores the value v into a key mk in the map named k. +func (s *Scratch) MapSet(k, mk string, v interface{}) (string, error) { + s.init() + + s.Lock() + defer s.Unlock() + return s.mapSet(k, mk, v, true) +} + +// MapSetX behaves the same as MapSet, except it will not overwrite the map +// key if it already exists. +func (s *Scratch) MapSetX(k, mk string, v interface{}) (string, error) { + s.init() + + s.Lock() + defer s.Unlock() + return s.mapSet(k, mk, v, false) +} + +// mapSet is sets the value in the map, overwriting if o is true. This function +// does not perform locking; callers should lock before invoking. +func (s *Scratch) mapSet(k, mk string, v interface{}, o bool) (string, error) { + if _, ok := s.values[k]; !ok { + s.values[k] = make(map[string]interface{}) + } + + typed, ok := s.values[k].(map[string]interface{}) + if !ok { + return "", fmt.Errorf("%q is not a map", k) + } + + if _, ok := typed[mk]; o || !ok { + typed[mk] = v + } + return "", nil +} + +// MapValues returns the list of values in the map sorted by key. +func (s *Scratch) MapValues(k string) ([]interface{}, error) { + s.init() + + s.Lock() + defer s.Unlock() + if s.values == nil { + return nil, nil + } + + typed, ok := s.values[k].(map[string]interface{}) + if !ok { + return nil, nil + } + + keys := make([]string, 0, len(typed)) + for k := range typed { + keys = append(keys, k) + } + sort.Strings(keys) + + sorted := make([]interface{}, len(keys)) + for i, k := range keys { + sorted[i] = typed[k] + } + return sorted, nil +} + +// init initializes the scratch. +func (s *Scratch) init() { + if s.values == nil { + s.values = make(map[string]interface{}) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/template/template.go b/vendor/github.com/hashicorp/consul-template/template/template.go new file mode 100644 index 000000000..36da55183 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/template.go @@ -0,0 +1,303 @@ +package template + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "io/ioutil" + "text/template" + + "github.com/pkg/errors" + + dep "github.com/hashicorp/consul-template/dependency" +) + +var ( + // ErrTemplateContentsAndSource is the error returned when a template + // specifies both a "source" and "content" argument, which is not valid. + ErrTemplateContentsAndSource = errors.New("template: cannot specify both 'source' and 'content'") + + // ErrTemplateMissingContentsAndSource is the error returned when a template + // does not specify either a "source" or "content" argument, which is not + // valid. + ErrTemplateMissingContentsAndSource = errors.New("template: must specify exactly one of 'source' or 'content'") +) + +// Template is the internal representation of an individual template to process. +// The template retains the relationship between it's contents and is +// responsible for it's own execution. +type Template struct { + // contents is the string contents for the template. It is either given + // during template creation or read from disk when initialized. + contents string + + // source is the original location of the template. This may be undefined if + // the template was dynamically defined. + source string + + // leftDelim and rightDelim are the template delimiters. + leftDelim string + rightDelim string + + // hexMD5 stores the hex version of the MD5 + hexMD5 string + + // errMissingKey causes the template processing to exit immediately if a map + // is indexed with a key that does not exist. + errMissingKey bool + + // functionBlacklist are functions not permitted to be executed + // when we render this template + functionBlacklist []string + + // sandboxPath adds a prefix to any path provided to the `file` function + // and causes an error if a relative path tries to traverse outside that + // prefix. + sandboxPath string +} + +// NewTemplateInput is used as input when creating the template. +type NewTemplateInput struct { + // Source is the location on disk to the file. + Source string + + // Contents are the raw template contents. + Contents string + + // ErrMissingKey causes the template parser to exit immediately with an error + // when a map is indexed with a key that does not exist. + ErrMissingKey bool + + // LeftDelim and RightDelim are the template delimiters. + LeftDelim string + RightDelim string + + // FunctionBlacklist are functions not permitted to be executed + // when we render this template + FunctionBlacklist []string + + // SandboxPath adds a prefix to any path provided to the `file` function + // and causes an error if a relative path tries to traverse outside that + // prefix. + SandboxPath string +} + +// NewTemplate creates and parses a new Consul Template template at the given +// path. If the template does not exist, an error is returned. During +// initialization, the template is read and is parsed for dependencies. Any +// errors that occur are returned. +func NewTemplate(i *NewTemplateInput) (*Template, error) { + if i == nil { + i = &NewTemplateInput{} + } + + // Validate that we are either given the path or the explicit contents + if i.Source != "" && i.Contents != "" { + return nil, ErrTemplateContentsAndSource + } else if i.Source == "" && i.Contents == "" { + return nil, ErrTemplateMissingContentsAndSource + } + + var t Template + t.source = i.Source + t.contents = i.Contents + t.leftDelim = i.LeftDelim + t.rightDelim = i.RightDelim + t.errMissingKey = i.ErrMissingKey + t.functionBlacklist = i.FunctionBlacklist + t.sandboxPath = i.SandboxPath + + if i.Source != "" { + contents, err := ioutil.ReadFile(i.Source) + if err != nil { + return nil, errors.Wrap(err, "failed to read template") + } + t.contents = string(contents) + } + + // Compute the MD5, encode as hex + hash := md5.Sum([]byte(t.contents)) + t.hexMD5 = hex.EncodeToString(hash[:]) + + return &t, nil +} + +// ID returns the identifier for this template. +func (t *Template) ID() string { + return t.hexMD5 +} + +// Contents returns the raw contents of the template. +func (t *Template) Contents() string { + return t.contents +} + +// Source returns the filepath source of this template. +func (t *Template) Source() string { + if t.source == "" { + return "(dynamic)" + } + return t.source +} + +// ExecuteInput is used as input to the template's execute function. +type ExecuteInput struct { + // Brain is the brain where data for the template is stored. + Brain *Brain + + // Env is a custom environment provided to the template for envvar resolution. + // Values specified here will take precedence over any values in the + // environment when using the `env` function. + Env []string +} + +// ExecuteResult is the result of the template execution. +type ExecuteResult struct { + // Used is the set of dependencies that were used. + Used *dep.Set + + // Missing is the set of dependencies that were missing. + Missing *dep.Set + + // Output is the rendered result. + Output []byte +} + +// Execute evaluates this template in the provided context. +func (t *Template) Execute(i *ExecuteInput) (*ExecuteResult, error) { + if i == nil { + i = &ExecuteInput{} + } + + var used, missing dep.Set + + tmpl := template.New("") + tmpl.Delims(t.leftDelim, t.rightDelim) + + tmpl.Funcs(funcMap(&funcMapInput{ + t: tmpl, + brain: i.Brain, + env: i.Env, + used: &used, + missing: &missing, + functionBlacklist: t.functionBlacklist, + sandboxPath: t.sandboxPath, + })) + + if t.errMissingKey { + tmpl.Option("missingkey=error") + } else { + tmpl.Option("missingkey=zero") + } + + tmpl, err := tmpl.Parse(t.contents) + if err != nil { + return nil, errors.Wrap(err, "parse") + } + + // Execute the template into the writer + var b bytes.Buffer + if err := tmpl.Execute(&b, nil); err != nil { + return nil, errors.Wrap(err, "execute") + } + + return &ExecuteResult{ + Used: &used, + Missing: &missing, + Output: b.Bytes(), + }, nil +} + +// funcMapInput is input to the funcMap, which builds the template functions. +type funcMapInput struct { + t *template.Template + brain *Brain + env []string + functionBlacklist []string + sandboxPath string + used *dep.Set + missing *dep.Set +} + +// funcMap is the map of template functions to their respective functions. +func funcMap(i *funcMapInput) template.FuncMap { + var scratch Scratch + + r := template.FuncMap{ + // API functions + "datacenters": datacentersFunc(i.brain, i.used, i.missing), + "file": fileFunc(i.brain, i.used, i.missing, i.sandboxPath), + "key": keyFunc(i.brain, i.used, i.missing), + "keyExists": keyExistsFunc(i.brain, i.used, i.missing), + "keyOrDefault": keyWithDefaultFunc(i.brain, i.used, i.missing), + "ls": lsFunc(i.brain, i.used, i.missing, true), + "safeLs": safeLsFunc(i.brain, i.used, i.missing), + "node": nodeFunc(i.brain, i.used, i.missing), + "nodes": nodesFunc(i.brain, i.used, i.missing), + "secret": secretFunc(i.brain, i.used, i.missing), + "secrets": secretsFunc(i.brain, i.used, i.missing), + "service": serviceFunc(i.brain, i.used, i.missing), + "services": servicesFunc(i.brain, i.used, i.missing), + "tree": treeFunc(i.brain, i.used, i.missing, true), + "safeTree": safeTreeFunc(i.brain, i.used, i.missing), + + // Scratch + "scratch": func() *Scratch { return &scratch }, + + // Helper functions + "base64Decode": base64Decode, + "base64Encode": base64Encode, + "base64URLDecode": base64URLDecode, + "base64URLEncode": base64URLEncode, + "byKey": byKey, + "byTag": byTag, + "contains": contains, + "containsAll": containsSomeFunc(true, true), + "containsAny": containsSomeFunc(false, false), + "containsNone": containsSomeFunc(true, false), + "containsNotAll": containsSomeFunc(false, true), + "env": envFunc(i.env), + "executeTemplate": executeTemplateFunc(i.t), + "explode": explode, + "explodeMap": explodeMap, + "in": in, + "indent": indent, + "loop": loop, + "join": join, + "trimSpace": trimSpace, + "parseBool": parseBool, + "parseFloat": parseFloat, + "parseInt": parseInt, + "parseJSON": parseJSON, + "parseUint": parseUint, + "plugin": plugin, + "regexReplaceAll": regexReplaceAll, + "regexMatch": regexMatch, + "replaceAll": replaceAll, + "timestamp": timestamp, + "toLower": toLower, + "toJSON": toJSON, + "toJSONPretty": toJSONPretty, + "toTitle": toTitle, + "toTOML": toTOML, + "toUpper": toUpper, + "toYAML": toYAML, + "split": split, + "byMeta": byMeta, + "sockaddr": sockaddr, + // Math functions + "add": add, + "subtract": subtract, + "multiply": multiply, + "divide": divide, + "modulo": modulo, + } + + for _, bf := range i.functionBlacklist { + if _, ok := r[bf]; ok { + r[bf] = blacklisted + } + } + + return r +} diff --git a/vendor/github.com/hashicorp/consul-template/version/version.go b/vendor/github.com/hashicorp/consul-template/version/version.go new file mode 100644 index 000000000..c0d0a6919 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/version/version.go @@ -0,0 +1,12 @@ +package version + +import "fmt" + +const Version = "0.22.0" + +var ( + Name string + GitCommit string + + HumanVersion = fmt.Sprintf("%s v%s (%s)", Name, Version, GitCommit) +) diff --git a/vendor/github.com/hashicorp/consul-template/watch/view.go b/vendor/github.com/hashicorp/consul-template/watch/view.go new file mode 100644 index 000000000..bcef6c0b2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/watch/view.go @@ -0,0 +1,308 @@ +package watch + +import ( + "fmt" + "log" + "math/rand" + "reflect" + "sync" + "time" + + dep "github.com/hashicorp/consul-template/dependency" +) + +const ( + // The amount of time to do a blocking query for + defaultWaitTime = 60 * time.Second +) + +// View is a representation of a Dependency and the most recent data it has +// received from Consul. +type View struct { + // dependency is the dependency that is associated with this View + dependency dep.Dependency + + // clients is the list of clients to communicate upstream. This is passed + // directly to the dependency. + clients *dep.ClientSet + + // data is the most-recently-received data from Consul for this View. It is + // accompanied by a series of locks and booleans to ensure consistency. + dataLock sync.RWMutex + data interface{} + receivedData bool + lastIndex uint64 + + // maxStale is the maximum amount of time to allow a query to be stale. + maxStale time.Duration + + // once determines if this view should receive data exactly once. + once bool + + // retryFunc is the function to invoke on failure to determine if a retry + // should be attempted. + retryFunc RetryFunc + + // stopCh is used to stop polling on this View + stopCh chan struct{} + + // vaultGrace is the grace period between a lease and the max TTL for which + // Consul Template will generate a new secret instead of renewing an existing + // one. + vaultGrace time.Duration +} + +// NewViewInput is used as input to the NewView function. +type NewViewInput struct { + // Dependency is the dependency to associate with the new view. + Dependency dep.Dependency + + // Clients is the list of clients to communicate upstream. This is passed + // directly to the dependency. + Clients *dep.ClientSet + + // MaxStale is the maximum amount a time a query response is allowed to be + // stale before forcing a read from the leader. + MaxStale time.Duration + + // Once indicates this view should poll for data exactly one time. + Once bool + + // RetryFunc is a function which dictates how this view should retry on + // upstream errors. + RetryFunc RetryFunc + + // VaultGrace is the grace period between a lease and the max TTL for which + // Consul Template will generate a new secret instead of renewing an existing + // one. + VaultGrace time.Duration +} + +// NewView constructs a new view with the given inputs. +func NewView(i *NewViewInput) (*View, error) { + return &View{ + dependency: i.Dependency, + clients: i.Clients, + maxStale: i.MaxStale, + once: i.Once, + retryFunc: i.RetryFunc, + stopCh: make(chan struct{}, 1), + vaultGrace: i.VaultGrace, + }, nil +} + +// Dependency returns the dependency attached to this View. +func (v *View) Dependency() dep.Dependency { + return v.dependency +} + +// Data returns the most-recently-received data from Consul for this View. +func (v *View) Data() interface{} { + v.dataLock.RLock() + defer v.dataLock.RUnlock() + return v.data +} + +// DataAndLastIndex returns the most-recently-received data from Consul for +// this view, along with the last index. This is atomic so you will get the +// index that goes with the data you are fetching. +func (v *View) DataAndLastIndex() (interface{}, uint64) { + v.dataLock.RLock() + defer v.dataLock.RUnlock() + return v.data, v.lastIndex +} + +// poll queries the Consul instance for data using the fetch function, but also +// accounts for interrupts on the interrupt channel. This allows the poll +// function to be fired in a goroutine, but then halted even if the fetch +// function is in the middle of a blocking query. +func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { + var retries int + + for { + doneCh := make(chan struct{}, 1) + successCh := make(chan struct{}, 1) + fetchErrCh := make(chan error, 1) + go v.fetch(doneCh, successCh, fetchErrCh) + + WAIT: + select { + case <-doneCh: + // Reset the retry to avoid exponentially incrementing retries when we + // have some successful requests + retries = 0 + + log.Printf("[TRACE] (view) %s received data", v.dependency) + select { + case <-v.stopCh: + return + case viewCh <- v: + } + + // If we are operating in once mode, do not loop - we received data at + // least once which is the API promise here. + if v.once { + return + } + case <-successCh: + // We successfully received a non-error response from the server. This + // does not mean we have data (that's dataCh's job), but rather this + // just resets the counter indicating we communicated successfully. For + // example, Consul make have an outage, but when it returns, the view + // is unchanged. We have to reset the counter retries, but not update the + // actual template. + log.Printf("[TRACE] (view) %s successful contact, resetting retries", v.dependency) + retries = 0 + goto WAIT + case err := <-fetchErrCh: + if v.retryFunc != nil { + retry, sleep := v.retryFunc(retries) + if retry { + log.Printf("[WARN] (view) %s (retry attempt %d after %q)", + err, retries+1, sleep) + select { + case <-time.After(sleep): + retries++ + continue + case <-v.stopCh: + return + } + } + } + + log.Printf("[ERR] (view) %s (exceeded maximum retries)", err) + + // Push the error back up to the watcher + select { + case <-v.stopCh: + return + case errCh <- err: + return + } + case <-v.stopCh: + log.Printf("[TRACE] (view) %s stopping poll (received on view stopCh)", v.dependency) + return + } + } +} + +// fetch queries the Consul instance for the attached dependency. This API +// promises that either data will be written to doneCh or an error will be +// written to errCh. It is designed to be run in a goroutine that selects the +// result of doneCh and errCh. It is assumed that only one instance of fetch +// is running per View and therefore no locking or mutexes are used. +func (v *View) fetch(doneCh, successCh chan<- struct{}, errCh chan<- error) { + log.Printf("[TRACE] (view) %s starting fetch", v.dependency) + + var allowStale bool + if v.maxStale != 0 { + allowStale = true + } + + for { + // If the view was stopped, short-circuit this loop. This prevents a bug + // where a view can get "lost" in the event Consul Template is reloaded. + select { + case <-v.stopCh: + return + default: + } + + start := time.Now() // for rateLimiter below + + data, rm, err := v.dependency.Fetch(v.clients, &dep.QueryOptions{ + AllowStale: allowStale, + WaitTime: defaultWaitTime, + WaitIndex: v.lastIndex, + VaultGrace: v.vaultGrace, + }) + if err != nil { + if err == dep.ErrStopped { + log.Printf("[TRACE] (view) %s reported stop", v.dependency) + } else { + errCh <- err + } + return + } + + if rm == nil { + errCh <- fmt.Errorf("received nil response metadata - this is a bug " + + "and should be reported") + return + } + + // If we got this far, we received data successfully. That data might not + // trigger a data update (because we could continue below), but we need to + // inform the poller to reset the retry count. + log.Printf("[TRACE] (view) %s marking successful data response", v.dependency) + select { + case successCh <- struct{}{}: + default: + } + + if allowStale && rm.LastContact > v.maxStale { + allowStale = false + log.Printf("[TRACE] (view) %s stale data (last contact exceeded max_stale)", v.dependency) + continue + } + + if v.maxStale != 0 { + allowStale = true + } + + if dur := rateLimiter(start); dur > 1 { + time.Sleep(dur) + } + + if rm.LastIndex == v.lastIndex { + log.Printf("[TRACE] (view) %s no new data (index was the same)", v.dependency) + continue + } + + v.dataLock.Lock() + if rm.LastIndex < v.lastIndex { + log.Printf("[TRACE] (view) %s had a lower index, resetting", v.dependency) + v.lastIndex = 0 + v.dataLock.Unlock() + continue + } + v.lastIndex = rm.LastIndex + + if v.receivedData && reflect.DeepEqual(data, v.data) { + log.Printf("[TRACE] (view) %s no new data (contents were the same)", v.dependency) + v.dataLock.Unlock() + continue + } + + if data == nil && rm.Block { + log.Printf("[TRACE] (view) %s asked for blocking query", v.dependency) + v.dataLock.Unlock() + continue + } + + v.data = data + v.receivedData = true + v.dataLock.Unlock() + + close(doneCh) + return + } +} + +const minDelayBetweenUpdates = time.Millisecond * 100 + +// return a duration to sleep to limit the frequency of upstream calls +func rateLimiter(start time.Time) time.Duration { + remaining := minDelayBetweenUpdates - time.Since(start) + if remaining > 0 { + dither := time.Duration(rand.Int63n(20000000)) // 0-20ms + return remaining + dither + } + return 0 +} + +// stop halts polling of this view. +func (v *View) stop() { + v.dependency.Stop() + close(v.stopCh) +} diff --git a/vendor/github.com/hashicorp/consul-template/watch/watcher.go b/vendor/github.com/hashicorp/consul-template/watch/watcher.go new file mode 100644 index 000000000..fcbaa3521 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/watch/watcher.go @@ -0,0 +1,253 @@ +package watch + +import ( + "log" + "sync" + "time" + + dep "github.com/hashicorp/consul-template/dependency" + "github.com/pkg/errors" +) + +// dataBufferSize is the default number of views to process in a batch. +const dataBufferSize = 2048 + +type RetryFunc func(int) (bool, time.Duration) + +// Watcher is a top-level manager for views that poll Consul for data. +type Watcher struct { + sync.Mutex + + // clients is the collection of API clients to talk to upstreams. + clients *dep.ClientSet + + // dataCh is the chan where Views will be published. + dataCh chan *View + + // errCh is the chan where any errors will be published. + errCh chan error + + // depViewMap is a map of Templates to Views. Templates are keyed by + // their string. + depViewMap map[string]*View + + // maxStale specifies the maximum staleness of a query response. + maxStale time.Duration + + // once signals if this watcher should tell views to retrieve data exactly + // one time instead of polling infinitely. + once bool + + // retryFuncs specifies the different ways to retry based on the upstream. + retryFuncConsul RetryFunc + retryFuncDefault RetryFunc + retryFuncVault RetryFunc + + // vaultGrace is the grace period between a lease and the max TTL for which + // Consul Template will generate a new secret instead of renewing an existing + // one. + vaultGrace time.Duration +} + +type NewWatcherInput struct { + // Clients is the client set to communicate with upstreams. + Clients *dep.ClientSet + + // MaxStale is the maximum staleness of a query. + MaxStale time.Duration + + // Once specifies this watcher should tell views to poll exactly once. + Once bool + + // RenewVault indicates if this watcher should renew Vault tokens. + RenewVault bool + + // VaultToken is the vault token to renew. + VaultToken string + + // VaultAgentTokenFile is the path to Vault Agent token file + VaultAgentTokenFile string + + // RetryFuncs specify the different ways to retry based on the upstream. + RetryFuncConsul RetryFunc + RetryFuncDefault RetryFunc + RetryFuncVault RetryFunc + + // VaultGrace is the grace period between a lease and the max TTL for which + // Consul Template will generate a new secret instead of renewing an existing + // one. + VaultGrace time.Duration +} + +// NewWatcher creates a new watcher using the given API client. +func NewWatcher(i *NewWatcherInput) (*Watcher, error) { + w := &Watcher{ + clients: i.Clients, + depViewMap: make(map[string]*View), + dataCh: make(chan *View, dataBufferSize), + errCh: make(chan error), + maxStale: i.MaxStale, + once: i.Once, + retryFuncConsul: i.RetryFuncConsul, + retryFuncDefault: i.RetryFuncDefault, + retryFuncVault: i.RetryFuncVault, + vaultGrace: i.VaultGrace, + } + + // Start a watcher for the Vault renew if that config was specified + if i.RenewVault { + vt, err := dep.NewVaultTokenQuery(i.VaultToken) + if err != nil { + return nil, errors.Wrap(err, "watcher") + } + if _, err := w.Add(vt); err != nil { + return nil, errors.Wrap(err, "watcher") + } + } + + if len(i.VaultAgentTokenFile) > 0 { + vag, err := dep.NewVaultAgentTokenQuery(i.VaultAgentTokenFile) + if err != nil { + return nil, errors.Wrap(err, "watcher") + } + if _, err := w.Add(vag); err != nil { + return nil, errors.Wrap(err, "watcher") + } + } + + return w, nil +} + +// DataCh returns a read-only channel of Views which is populated when a view +// receives data from its upstream. +func (w *Watcher) DataCh() <-chan *View { + return w.dataCh +} + +// ErrCh returns a read-only channel of errors returned by the upstream. +func (w *Watcher) ErrCh() <-chan error { + return w.errCh +} + +// Add adds the given dependency to the list of monitored dependencies +// and start the associated view. If the dependency already exists, no action is +// taken. +// +// If the Dependency already existed, it this function will return false. If the +// view was successfully created, it will return true. If an error occurs while +// creating the view, it will be returned here (but future errors returned by +// the view will happen on the channel). +func (w *Watcher) Add(d dep.Dependency) (bool, error) { + w.Lock() + defer w.Unlock() + + log.Printf("[DEBUG] (watcher) adding %s", d) + + if _, ok := w.depViewMap[d.String()]; ok { + log.Printf("[TRACE] (watcher) %s already exists, skipping", d) + return false, nil + } + + // Choose the correct retry function based off of the dependency's type. + var retryFunc RetryFunc + switch d.Type() { + case dep.TypeConsul: + retryFunc = w.retryFuncConsul + case dep.TypeVault: + retryFunc = w.retryFuncVault + default: + retryFunc = w.retryFuncDefault + } + + v, err := NewView(&NewViewInput{ + Dependency: d, + Clients: w.clients, + MaxStale: w.maxStale, + Once: w.once, + RetryFunc: retryFunc, + VaultGrace: w.vaultGrace, + }) + if err != nil { + return false, errors.Wrap(err, "watcher") + } + + log.Printf("[TRACE] (watcher) %s starting", d) + + w.depViewMap[d.String()] = v + go v.poll(w.dataCh, w.errCh) + + return true, nil +} + +// Watching determines if the given dependency is being watched. +func (w *Watcher) Watching(d dep.Dependency) bool { + w.Lock() + defer w.Unlock() + + _, ok := w.depViewMap[d.String()] + return ok +} + +// ForceWatching is used to force setting the internal state of watching +// a dependency. This is only used for unit testing purposes. +func (w *Watcher) ForceWatching(d dep.Dependency, enabled bool) { + w.Lock() + defer w.Unlock() + + if enabled { + w.depViewMap[d.String()] = nil + } else { + delete(w.depViewMap, d.String()) + } +} + +// Remove removes the given dependency from the list and stops the +// associated View. If a View for the given dependency does not exist, this +// function will return false. If the View does exist, this function will return +// true upon successful deletion. +func (w *Watcher) Remove(d dep.Dependency) bool { + w.Lock() + defer w.Unlock() + + log.Printf("[DEBUG] (watcher) removing %s", d) + + if view, ok := w.depViewMap[d.String()]; ok { + log.Printf("[TRACE] (watcher) actually removing %s", d) + view.stop() + delete(w.depViewMap, d.String()) + return true + } + + log.Printf("[TRACE] (watcher) %s did not exist, skipping", d) + return false +} + +// Size returns the number of views this watcher is watching. +func (w *Watcher) Size() int { + w.Lock() + defer w.Unlock() + return len(w.depViewMap) +} + +// Stop halts this watcher and any currently polling views immediately. If a +// view was in the middle of a poll, no data will be returned. +func (w *Watcher) Stop() { + w.Lock() + defer w.Unlock() + + log.Printf("[DEBUG] (watcher) stopping all views") + + for _, view := range w.depViewMap { + if view == nil { + continue + } + log.Printf("[TRACE] (watcher) stopping %s", view.Dependency()) + view.stop() + } + + // Reset the map to have no views + w.depViewMap = make(map[string]*View) + + // Close any idle TCP connections + w.clients.Stop() +} diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 53a052363..124409ff2 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -4,7 +4,10 @@ import ( "fmt" "io" "io/ioutil" + "net/url" "time" + + "github.com/mitchellh/mapstructure" ) const ( @@ -19,18 +22,26 @@ type ACLTokenPolicyLink struct { ID string Name string } +type ACLTokenRoleLink struct { + ID string + Name string +} // ACLToken represents an ACL Token type ACLToken struct { - CreateIndex uint64 - ModifyIndex uint64 - AccessorID string - SecretID string - Description string - Policies []*ACLTokenPolicyLink - Local bool - CreateTime time.Time `json:",omitempty"` - Hash []byte `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + SecretID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Local bool + ExpirationTTL time.Duration `json:",omitempty"` + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time `json:",omitempty"` + Hash []byte `json:",omitempty"` // DEPRECATED (ACL-Legacy-Compat) // Rules will only be present for legacy tokens returned via the new APIs @@ -38,15 +49,18 @@ type ACLToken struct { } type ACLTokenListEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - AccessorID string - Description string - Policies []*ACLTokenPolicyLink - Local bool - CreateTime time.Time - Hash []byte - Legacy bool + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Local bool + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time + Hash []byte + Legacy bool } // ACLEntry is used to represent a legacy ACL token @@ -67,11 +81,20 @@ type ACLReplicationStatus struct { SourceDatacenter string ReplicationType string ReplicatedIndex uint64 + ReplicatedRoleIndex uint64 ReplicatedTokenIndex uint64 LastSuccess time.Time LastError time.Time } +// ACLServiceIdentity represents a high-level grant of all necessary privileges +// to assume the identity of the named Service in the Catalog and within +// Connect. +type ACLServiceIdentity struct { + ServiceName string + Datacenters []string `json:",omitempty"` +} + // ACLPolicy represents an ACL Policy. type ACLPolicy struct { ID string @@ -94,6 +117,113 @@ type ACLPolicyListEntry struct { ModifyIndex uint64 } +type ACLRolePolicyLink struct { + ID string + Name string +} + +// ACLRole represents an ACL Role. +type ACLRole struct { + ID string + Name string + Description string + Policies []*ACLRolePolicyLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// BindingRuleBindType is the type of binding rule mechanism used. +type BindingRuleBindType string + +const ( + // BindingRuleBindTypeService binds to a service identity with the given name. + BindingRuleBindTypeService BindingRuleBindType = "service" + + // BindingRuleBindTypeRole binds to pre-existing roles with the given name. + BindingRuleBindTypeRole BindingRuleBindType = "role" +) + +type ACLBindingRule struct { + ID string + Description string + AuthMethod string + Selector string + BindType BindingRuleBindType + BindName string + + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLAuthMethod struct { + Name string + Type string + Description string + + // Configuration is arbitrary configuration for the auth method. This + // should only contain primitive values and containers (such as lists and + // maps). + Config map[string]interface{} + + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLAuthMethodListEntry struct { + Name string + Type string + Description string + CreateIndex uint64 + ModifyIndex uint64 +} + +// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed +// KubernetesAuthMethodConfig. +func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) { + var config KubernetesAuthMethodConfig + decodeConf := &mapstructure.DecoderConfig{ + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + +// KubernetesAuthMethodConfig is the config for the built-in Consul auth method +// for Kubernetes. +type KubernetesAuthMethodConfig struct { + Host string `json:",omitempty"` + CACert string `json:",omitempty"` + ServiceAccountJWT string `json:",omitempty"` +} + +// RenderToConfig converts this into a map[string]interface{} suitable for use +// in the ACLAuthMethod.Config field. +func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} { + return map[string]interface{}{ + "Host": c.Host, + "CACert": c.CACert, + "ServiceAccountJWT": c.ServiceAccountJWT, + } +} + +type ACLLoginParams struct { + AuthMethod string + BearerToken string + Meta map[string]string `json:",omitempty"` +} + // ACL can be used to query the ACL endpoints type ACL struct { c *Client @@ -266,17 +396,9 @@ func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, e return entries, qm, nil } -// TokenCreate creates a new ACL token. It requires that the AccessorID and SecretID fields -// of the ACLToken structure to be empty as these will be filled in by Consul. +// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields +// of the ACLToken structure are empty they will be filled in by Consul. func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if token.AccessorID != "" { - return nil, nil, fmt.Errorf("Cannot specify an AccessorID in Token Creation") - } - - if token.SecretID != "" { - return nil, nil, fmt.Errorf("Cannot specify a SecretID in Token Creation") - } - r := a.c.newRequest("PUT", "/v1/acl/token") r.setWriteOptions(q) r.obj = token @@ -437,7 +559,6 @@ func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri if policy.ID != "" { return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation") } - r := a.c.newRequest("PUT", "/v1/acl/policy") r.setWriteOptions(q) r.obj = policy @@ -460,7 +581,7 @@ func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri // existing policy ID func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { if policy.ID == "" { - return nil, nil, fmt.Errorf("Must specify an ID in Policy Creation") + return nil, nil, fmt.Errorf("Must specify an ID in Policy Update") } r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID) @@ -586,3 +707,410 @@ func (a *ACL) RulesTranslateToken(tokenID string) (string, error) { return string(ruleBytes), nil } + +// RoleCreate will create a new role. It is not allowed for the role parameters +// ID field to be set as this will be generated by Consul while processing the request. +func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/role") + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleUpdate updates a role. The ID field of the role parameter must be set to an +// existing role ID +func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Role Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID) + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleDelete deletes a role given its ID. +func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// RoleRead retrieves the role details (by ID). Returns nil if not found. +func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/"+roleID) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleReadByName retrieves the role details (by name). Returns nil if not found. +func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleList retrieves a listing of all roles. The listing does not include some +// metadata for the role as those should be retrieved by subsequent calls to +// RoleRead. +func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/roles") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLRole + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// AuthMethodCreate will create a new auth method. +func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method") + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodUpdate updates an auth method. +func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name)) + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodDelete deletes an auth method given its Name. +func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) { + if methodName == "" { + return nil, fmt.Errorf("Must specify a Name in Auth Method Delete") + } + + r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// AuthMethodRead retrieves the auth method. Returns nil if not found. +func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) { + if methodName == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read") + } + + r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// AuthMethodList retrieves a listing of all auth methods. The listing does not +// include some metadata for the auth method as those should be retrieved by +// subsequent calls to AuthMethodRead. +func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/auth-methods") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLAuthMethodListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// BindingRuleCreate will create a new binding rule. It is not allowed for the +// binding rule parameter's ID field to be set as this will be generated by +// Consul while processing the request. +func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule") + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleUpdate updates a binding rule. The ID field of the role binding +// rule parameter must be set to an existing binding rule ID. +func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID) + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleDelete deletes a binding rule given its ID. +func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// BindingRuleRead retrieves the binding rule details. Returns nil if not found. +func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// BindingRuleList retrieves a listing of all binding rules. +func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rules") + if methodName != "" { + r.params.Set("authmethod", methodName) + } + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLBindingRule + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Login is used to exchange auth method credentials for a newly-minted Consul Token. +func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/login") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// Logout is used to destroy a Consul Token created via Login(). +func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/logout") + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 412b37df5..04043ba84 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -84,11 +84,11 @@ type AgentService struct { Address string Weights AgentWeights EnableTagOverride bool - CreateIndex uint64 `json:",omitempty"` - ModifyIndex uint64 `json:",omitempty"` - ContentHash string `json:",omitempty"` + CreateIndex uint64 `json:",omitempty" bexpr:"-"` + ModifyIndex uint64 `json:",omitempty" bexpr:"-"` + ContentHash string `json:",omitempty" bexpr:"-"` // DEPRECATED (ProxyDestination) - remove this field - ProxyDestination string `json:",omitempty"` + ProxyDestination string `json:",omitempty" bexpr:"-"` Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` Connect *AgentServiceConnect `json:",omitempty"` } @@ -103,8 +103,8 @@ type AgentServiceChecksInfo struct { // AgentServiceConnect represents the Connect configuration of a service. type AgentServiceConnect struct { Native bool `json:",omitempty"` - Proxy *AgentServiceConnectProxy `json:",omitempty"` - SidecarService *AgentServiceRegistration `json:",omitempty"` + Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"` + SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"` } // AgentServiceConnectProxy represents the Connect Proxy configuration of a @@ -112,7 +112,7 @@ type AgentServiceConnect struct { type AgentServiceConnectProxy struct { ExecMode ProxyExecMode `json:",omitempty"` Command []string `json:",omitempty"` - Config map[string]interface{} `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` Upstreams []Upstream `json:",omitempty"` } @@ -123,7 +123,7 @@ type AgentServiceConnectProxyConfig struct { DestinationServiceID string `json:",omitempty"` LocalServiceAddress string `json:",omitempty"` LocalServicePort int `json:",omitempty"` - Config map[string]interface{} `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` Upstreams []Upstream } @@ -278,9 +278,9 @@ type ConnectProxyConfig struct { ContentHash string // DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs // but they don't need ExecMode or Command - ExecMode ProxyExecMode `json:",omitempty"` - Command []string `json:",omitempty"` - Config map[string]interface{} + ExecMode ProxyExecMode `json:",omitempty"` + Command []string `json:",omitempty"` + Config map[string]interface{} `bexpr:"-"` Upstreams []Upstream } @@ -292,7 +292,7 @@ type Upstream struct { Datacenter string `json:",omitempty"` LocalBindAddress string `json:",omitempty"` LocalBindPort int `json:",omitempty"` - Config map[string]interface{} `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` } // Agent can be used to query the Agent endpoints @@ -387,7 +387,14 @@ func (a *Agent) NodeName() (string, error) { // Checks returns the locally registered checks func (a *Agent) Checks() (map[string]*AgentCheck, error) { + return a.ChecksWithFilter("") +} + +// ChecksWithFilter returns a subset of the locally registered checks that match +// the given filter expression +func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) { r := a.c.newRequest("GET", "/v1/agent/checks") + r.filterQuery(filter) _, resp, err := requireOK(a.c.doRequest(r)) if err != nil { return nil, err @@ -403,7 +410,14 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) { // Services returns the locally registered services func (a *Agent) Services() (map[string]*AgentService, error) { + return a.ServicesWithFilter("") +} + +// ServicesWithFilter returns a subset of the locally registered services that match +// the given filter expression +func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) { r := a.c.newRequest("GET", "/v1/agent/services") + r.filterQuery(filter) _, resp, err := requireOK(a.c.doRequest(r)) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 39a0ad3e1..4b17ff6cd 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -30,6 +30,10 @@ const ( // the HTTP token. HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + // HTTPTokenFileEnvName defines an environment variable name which sets + // the HTTP token file. + HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE" + // HTTPAuthEnvName defines an environment variable name which sets // the HTTP authentication header. HTTPAuthEnvName = "CONSUL_HTTP_AUTH" @@ -146,6 +150,10 @@ type QueryOptions struct { // ctx is an optional context pass through to the underlying HTTP // request layer. Use Context() and WithContext() to manage this. ctx context.Context + + // Filter requests filtering data prior to it being returned. The string + // is a go-bexpr compatible expression. + Filter string } func (o *QueryOptions) Context() context.Context { @@ -276,6 +284,10 @@ type Config struct { // which overrides the agent's default token. Token string + // TokenFile is a file containing the current token to use for this client. + // If provided it is read once at startup and never again. + TokenFile string + TLSConfig TLSConfig } @@ -339,6 +351,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config { config.Address = addr } + if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" { + config.TokenFile = tokenFile + } + if token := os.Getenv(HTTPTokenEnvName); token != "" { config.Token = token } @@ -445,6 +461,7 @@ func (c *Config) GenerateEnv() []string { env = append(env, fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address), fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token), + fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile), fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"), fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile), fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath), @@ -537,6 +554,19 @@ func NewClient(config *Config) (*Client, error) { config.Address = parts[1] } + // If the TokenFile is set, always use that, even if a Token is configured. + // This is because when TokenFile is set it is read into the Token field. + // We want any derived clients to have to re-read the token file. + if config.TokenFile != "" { + data, err := ioutil.ReadFile(config.TokenFile) + if err != nil { + return nil, fmt.Errorf("Error loading token file: %s", err) + } + + if token := strings.TrimSpace(string(data)); token != "" { + config.Token = token + } + } if config.Token == "" { config.Token = defConfig.Token } @@ -614,6 +644,9 @@ func (r *request) setQueryOptions(q *QueryOptions) { if q.Near != "" { r.params.Set("near", q.Near) } + if q.Filter != "" { + r.params.Set("filter", q.Filter) + } if len(q.NodeMeta) > 0 { for key, value := range q.NodeMeta { r.params.Add("node-meta", key+":"+value) @@ -813,6 +846,8 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (* } // parseQueryMeta is used to help parse query meta-data +// +// TODO(rb): bug? the error from this function is never handled func parseQueryMeta(resp *http.Response, q *QueryMeta) error { header := resp.Header @@ -890,10 +925,42 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h return d, nil, e } if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + return d, nil, generateUnexpectedResponseCodeError(resp) } return d, resp, nil } + +func (req *request) filterQuery(filter string) { + if filter == "" { + return + } + + req.params.Set("filter", filter) +} + +// generateUnexpectedResponseCodeError consumes the rest of the body, closes +// the body stream and generates an error indicating the status code was +// unexpected. +func generateUnexpectedResponseCodeError(resp *http.Response) error { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) +} + +func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return false, d, nil, e + } + switch resp.StatusCode { + case 200: + return true, d, resp, nil + case 404: + return false, d, resp, nil + default: + return false, d, nil, generateUnexpectedResponseCodeError(resp) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go new file mode 100644 index 000000000..0c18963fd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -0,0 +1,255 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + + "github.com/mitchellh/mapstructure" +) + +const ( + ServiceDefaults string = "service-defaults" + ProxyDefaults string = "proxy-defaults" + ProxyConfigGlobal string = "global" +) + +type ConfigEntry interface { + GetKind() string + GetName() string + GetCreateIndex() uint64 + GetModifyIndex() uint64 +} + +type ServiceConfigEntry struct { + Kind string + Name string + Protocol string + CreateIndex uint64 + ModifyIndex uint64 +} + +func (s *ServiceConfigEntry) GetKind() string { + return s.Kind +} + +func (s *ServiceConfigEntry) GetName() string { + return s.Name +} + +func (s *ServiceConfigEntry) GetCreateIndex() uint64 { + return s.CreateIndex +} + +func (s *ServiceConfigEntry) GetModifyIndex() uint64 { + return s.ModifyIndex +} + +type ProxyConfigEntry struct { + Kind string + Name string + Config map[string]interface{} + CreateIndex uint64 + ModifyIndex uint64 +} + +func (p *ProxyConfigEntry) GetKind() string { + return p.Kind +} + +func (p *ProxyConfigEntry) GetName() string { + return p.Name +} + +func (p *ProxyConfigEntry) GetCreateIndex() uint64 { + return p.CreateIndex +} + +func (p *ProxyConfigEntry) GetModifyIndex() uint64 { + return p.ModifyIndex +} + +type rawEntryListResponse struct { + kind string + Entries []map[string]interface{} +} + +func makeConfigEntry(kind, name string) (ConfigEntry, error) { + switch kind { + case ServiceDefaults: + return &ServiceConfigEntry{Name: name}, nil + case ProxyDefaults: + return &ProxyConfigEntry{Name: name}, nil + default: + return nil, fmt.Errorf("invalid config entry kind: %s", kind) + } +} + +func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { + var entry ConfigEntry + + kindVal, ok := raw["Kind"] + if !ok { + kindVal, ok = raw["kind"] + } + if !ok { + return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level") + } + + if kindStr, ok := kindVal.(string); ok { + newEntry, err := makeConfigEntry(kindStr, "") + if err != nil { + return nil, err + } + entry = newEntry + } else { + return nil, fmt.Errorf("Kind value in payload is not a string") + } + + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Result: &entry, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + return entry, decoder.Decode(raw) +} + +func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) { + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return nil, err + } + + return DecodeConfigEntry(raw) +} + +// Config can be used to query the Config endpoints +type ConfigEntries struct { + c *Client +} + +// Config returns a handle to the Config endpoints +func (c *Client) ConfigEntries() *ConfigEntries { + return &ConfigEntries{c} +} + +func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) { + if kind == "" || name == "" { + return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + entry, err := makeConfigEntry(kind, name) + if err != nil { + return nil, nil, err + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setQueryOptions(q) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, entry); err != nil { + return nil, nil, err + } + + return entry, qm, nil +} + +func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) { + if kind == "" { + return nil, nil, fmt.Errorf("The kind parameter must not be empty") + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind)) + r.setQueryOptions(q) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var raw []map[string]interface{} + if err := decodeBody(resp, &raw); err != nil { + return nil, nil, err + } + + var entries []ConfigEntry + for _, rawEntry := range raw { + entry, err := DecodeConfigEntry(rawEntry) + if err != nil { + return nil, nil, err + } + entries = append(entries, entry) + } + + return entries, qm, nil +} + +func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, nil, w) +} + +func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w) +} + +func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) { + r := conf.c.newRequest("PUT", "/v1/config") + r.setWriteOptions(w) + for param, value := range params { + r.params.Set(param, value) + } + r.obj = entry + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + wm := &WriteMeta{RequestTime: rtt} + return res, wm, nil +} + +func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) { + if kind == "" || name == "" { + return nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setWriteOptions(w) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod index 25f931c55..e19821891 100644 --- a/vendor/github.com/hashicorp/consul/api/go.mod +++ b/vendor/github.com/hashicorp/consul/api/go.mod @@ -5,7 +5,7 @@ go 1.12 replace github.com/hashicorp/consul/sdk => ../sdk require ( - github.com/hashicorp/consul/sdk v0.1.0 + github.com/hashicorp/consul/sdk v0.1.1 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-rootcerts v1.0.0 github.com/hashicorp/go-uuid v1.0.1 diff --git a/vendor/github.com/hashicorp/go-sockaddr/template/GNUmakefile b/vendor/github.com/hashicorp/go-sockaddr/template/GNUmakefile new file mode 100644 index 000000000..ce1e274e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/template/GNUmakefile @@ -0,0 +1,2 @@ +test:: + go test diff --git a/vendor/github.com/hashicorp/go-sockaddr/template/README.md b/vendor/github.com/hashicorp/go-sockaddr/template/README.md new file mode 100644 index 000000000..c40905af7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/template/README.md @@ -0,0 +1,6 @@ +# sockaddr/template + +sockaddr's template library. See +the +[sockaddr/template](https://godoc.org/github.com/hashicorp/go-sockaddr/template) +docs for details on how to use this template. diff --git a/vendor/github.com/hashicorp/go-sockaddr/template/doc.go b/vendor/github.com/hashicorp/go-sockaddr/template/doc.go new file mode 100644 index 000000000..8cc6730a4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/template/doc.go @@ -0,0 +1,311 @@ +/* + +Package sockaddr/template provides a text/template interface the SockAddr helper +functions. The primary entry point into the sockaddr/template package is +through its Parse() call. For example: + + import ( + "fmt" + + template "github.com/hashicorp/go-sockaddr/template" + ) + + results, err := template.Parse(`{{ GetPrivateIP }}`) + if err != nil { + fmt.Errorf("Unable to find a private IP address: %v", err) + } + fmt.Printf("My Private IP address is: %s\n", results) + +Below is a list of builtin template functions and details re: their usage. It +is possible to add additional functions by calling ParseIfAddrsTemplate +directly. + +In general, the calling convention for this template library is to seed a list +of initial interfaces via one of the Get*Interfaces() calls, then filter, sort, +and extract the necessary attributes for use as string input. This template +interface is primarily geared toward resolving specific values that are only +available at runtime, but can be defined as a heuristic for execution when a +config file is parsed. + +All functions, unless noted otherwise, return an array of IfAddr structs making +it possible to `sort`, `filter`, `limit`, seek (via the `offset` function), or +`unique` the list. To extract useful string information, the `attr` and `join` +functions return a single string value. See below for details. + +Important note: see the +https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr utility for +more examples and for a CLI utility to experiment with the template syntax. + +`GetAllInterfaces` - Returns an exhaustive set of IfAddr structs available on +the host. `GetAllInterfaces` is the initial input and accessible as the initial +"dot" in the pipeline. + +Example: + + {{ GetAllInterfaces }} + + +`GetDefaultInterfaces` - Returns one IfAddr for every IP that is on the +interface containing the default route for the host. + +Example: + + {{ GetDefaultInterfaces }} + +`GetPrivateInterfaces` - Returns one IfAddr for every forwardable IP address +that is included in RFC 6890 and whose interface is marked as up. NOTE: RFC 6890 is a more exhaustive +version of RFC1918 because it spans IPv4 and IPv6, however, RFC6890 does permit the +inclusion of likely undesired addresses such as multicast, therefore our version +of "private" also filters out non-forwardable addresses. + +Example: + + {{ GetPrivateInterfaces | sort "default" | join "address" " " }} + + +`GetPublicInterfaces` - Returns a list of IfAddr structs whos IPs are +forwardable, do not match RFC 6890, and whose interface is marked up. + +Example: + + {{ GetPublicInterfaces | sort "default" | join "name" " " }} + + +`GetPrivateIP` - Helper function that returns a string of the first IP address +from GetPrivateInterfaces. + +Example: + + {{ GetPrivateIP }} + + +`GetPrivateIPs` - Helper function that returns a string of the all private IP +addresses on the host. + +Example: + + {{ GetPrivateIPs }} + + +`GetPublicIP` - Helper function that returns a string of the first IP from +GetPublicInterfaces. + +Example: + + {{ GetPublicIP }} + +`GetPublicIPs` - Helper function that returns a space-delimited string of the +all public IP addresses on the host. + +Example: + + {{ GetPrivateIPs }} + + +`GetInterfaceIP` - Helper function that returns a string of the first IP from +the named interface. + +Example: + + {{ GetInterfaceIP "en0" }} + + + +`GetInterfaceIPs` - Helper function that returns a space-delimited list of all +IPs on a given interface. + +Example: + + {{ GetInterfaceIPs "en0" }} + + +`sort` - Sorts the IfAddrs result based on its arguments. `sort` takes one +argument, a list of ways to sort its IfAddrs argument. The list of sort +criteria is comma separated (`,`): + - `address`, `+address`: Ascending sort of IfAddrs by Address + - `-address`: Descending sort of IfAddrs by Address + - `default`, `+default`: Ascending sort of IfAddrs, IfAddr with a default route first + - `-default`: Descending sort of IfAddrs, IfAttr with default route last + - `name`, `+name`: Ascending sort of IfAddrs by lexical ordering of interface name + - `-name`: Descending sort of IfAddrs by lexical ordering of interface name + - `port`, `+port`: Ascending sort of IfAddrs by port number + - `-port`: Descending sort of IfAddrs by port number + - `private`, `+private`: Ascending sort of IfAddrs with private addresses first + - `-private`: Descending sort IfAddrs with private addresses last + - `size`, `+size`: Ascending sort of IfAddrs by their network size as determined + by their netmask (larger networks first) + - `-size`: Descending sort of IfAddrs by their network size as determined by their + netmask (smaller networks first) + - `type`, `+type`: Ascending sort of IfAddrs by the type of the IfAddr (Unix, + IPv4, then IPv6) + - `-type`: Descending sort of IfAddrs by the type of the IfAddr (IPv6, IPv4, Unix) + +Example: + + {{ GetPrivateInterfaces | sort "default,-type,size,+address" }} + + +`exclude` and `include`: Filters IfAddrs based on the selector criteria and its +arguments. Both `exclude` and `include` take two arguments. The list of +available filtering criteria is: + - "address": Filter IfAddrs based on a regexp matching the string representation + of the address + - "flag","flags": Filter IfAddrs based on the list of flags specified. Multiple + flags can be passed together using the pipe character (`|`) to create an inclusive + bitmask of flags. The list of flags is included below. + - "name": Filter IfAddrs based on a regexp matching the interface name. + - "network": Filter IfAddrs based on whether a netowkr is included in a given + CIDR. More than one CIDR can be passed in if each network is separated by + the pipe character (`|`). + - "port": Filter IfAddrs based on an exact match of the port number (number must + be expressed as a string) + - "rfc", "rfcs": Filter IfAddrs based on the matching RFC. If more than one RFC + is specified, the list of RFCs can be joined together using the pipe character (`|`). + - "size": Filter IfAddrs based on the exact match of the mask size. + - "type": Filter IfAddrs based on their SockAddr type. Multiple types can be + specified together by using the pipe character (`|`). Valid types include: + `ip`, `ipv4`, `ipv6`, and `unix`. + +Example: + + {{ GetPrivateInterfaces | exclude "type" "IPv6" }} + + +`unique`: Removes duplicate entries from the IfAddrs list, assuming the list has +already been sorted. `unique` only takes one argument: + - "address": Removes duplicates with the same address + - "name": Removes duplicates with the same interface names + +Example: + + {{ GetAllInterfaces | sort "default,-type,address" | unique "name" }} + + +`limit`: Reduces the size of the list to the specified value. + +Example: + + {{ GetPrivateInterfaces | limit 1 }} + + +`offset`: Seeks into the list by the specified value. A negative value can be +used to seek from the end of the list. + +Example: + + {{ GetPrivateInterfaces | offset "-2" | limit 1 }} + + +`math`: Perform a "math" operation on each member of the list and return new +values. `math` takes two arguments, the attribute to operate on and the +operation's value. + +Supported operations include: + + - `address`: Adds the value, a positive or negative value expressed as a + decimal string, to the address. The sign is required. This value is + allowed to over or underflow networks (e.g. 127.255.255.255 `"address" "+1"` + will return "128.0.0.0"). Addresses will wrap at IPv4 or IPv6 boundaries. + - `network`: Add the value, a positive or negative value expressed as a + decimal string, to the network address. The sign is required. Positive + values are added to the network address. Negative values are subtracted + from the network's broadcast address (e.g. 127.0.0.1 `"network" "-1"` will + return "127.255.255.255"). Values that overflow the network size will + safely wrap. + - `mask`: Applies the given network mask to the address. The network mask is + expressed as a decimal value (e.g. network mask "24" corresponds to + `255.255.255.0`). After applying the network mask, the network mask of the + resulting address will be either the applied network mask or the network mask + of the input address depending on which network is larger + (e.g. 192.168.10.20/24 `"mask" "16"` will return "192.168.0.0/16" but + 192.168.10.20/24 `"mask" "28"` will return "192.168.10.16/24"). + +Example: + + {{ GetPrivateInterfaces | include "type" "IP" | math "address" "+256" | attr "address" }} + {{ GetPrivateInterfaces | include "type" "IP" | math "address" "-256" | attr "address" }} + {{ GetPrivateInterfaces | include "type" "IP" | math "network" "+2" | attr "address" }} + {{ GetPrivateInterfaces | include "type" "IP" | math "network" "-2" | attr "address" }} + {{ GetPrivateInterfaces | include "type" "IP" | math "mask" "24" | attr "address" }} + {{ GetPrivateInterfaces | include "flags" "forwardable|up" | include "type" "IPv4" | math "network" "+2" | attr "address" }} + + +`attr`: Extracts a single attribute of the first member of the list and returns +it as a string. `attr` takes a single attribute name. The list of available +attributes is type-specific and shared between `join`. See below for a list of +supported attributes. + +Example: + + {{ GetAllInterfaces | exclude "flags" "up" | attr "address" }} + + +`Attr`: Extracts a single attribute from an `IfAttr` and in every other way +performs the same as the `attr`. + +Example: + + {{ with $ifAddrs := GetAllInterfaces | include "type" "IP" | sort "+type,+address" -}} + {{- range $ifAddrs -}} + {{- Attr "address" . }} -- {{ Attr "network" . }}/{{ Attr "size" . -}} + {{- end -}} + {{- end }} + + +`join`: Similar to `attr`, `join` extracts all matching attributes of the list +and returns them as a string joined by the separator, the second argument to +`join`. The list of available attributes is type-specific and shared between +`join`. + +Example: + + {{ GetAllInterfaces | include "flags" "forwardable" | join "address" " " }} + + +`exclude` and `include` flags: + - `broadcast` + - `down`: Is the interface down? + - `forwardable`: Is the IP forwardable? + - `global unicast` + - `interface-local multicast` + - `link-local multicast` + - `link-local unicast` + - `loopback` + - `multicast` + - `point-to-point` + - `unspecified`: Is the IfAddr the IPv6 unspecified address? + - `up`: Is the interface up? + + +Attributes for `attr`, `Attr`, and `join`: + +SockAddr Type: + - `string` + - `type` + +IPAddr Type: + - `address` + - `binary` + - `first_usable` + - `hex` + - `host` + - `last_usable` + - `mask_bits` + - `netmask` + - `network` + - `octets`: Decimal values per byte + - `port` + - `size`: Number of hosts in the network + +IPv4Addr Type: + - `broadcast` + - `uint32`: unsigned integer representation of the value + +IPv6Addr Type: + - `uint128`: unsigned integer representation of the value + +UnixSock Type: + - `path` + +*/ +package template diff --git a/vendor/github.com/hashicorp/go-sockaddr/template/template.go b/vendor/github.com/hashicorp/go-sockaddr/template/template.go new file mode 100644 index 000000000..bbed51361 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/template/template.go @@ -0,0 +1,155 @@ +package template + +import ( + "bytes" + "fmt" + "text/template" + + "github.com/hashicorp/errwrap" + sockaddr "github.com/hashicorp/go-sockaddr" +) + +var ( + // SourceFuncs is a map of all top-level functions that generate + // sockaddr data types. + SourceFuncs template.FuncMap + + // SortFuncs is a map of all functions used in sorting + SortFuncs template.FuncMap + + // FilterFuncs is a map of all functions used in sorting + FilterFuncs template.FuncMap + + // HelperFuncs is a map of all functions used in sorting + HelperFuncs template.FuncMap +) + +func init() { + SourceFuncs = template.FuncMap{ + // GetAllInterfaces - Returns an exhaustive set of IfAddr + // structs available on the host. `GetAllInterfaces` is the + // initial input and accessible as the initial "dot" in the + // pipeline. + "GetAllInterfaces": sockaddr.GetAllInterfaces, + + // GetDefaultInterfaces - Returns one IfAddr for every IP that + // is on the interface containing the default route for the + // host. + "GetDefaultInterfaces": sockaddr.GetDefaultInterfaces, + + // GetPrivateInterfaces - Returns one IfAddr for every IP that + // matches RFC 6890, are attached to the interface with the + // default route, and are forwardable IP addresses. NOTE: RFC + // 6890 is a more exhaustive version of RFC1918 because it spans + // IPv4 and IPv6, however it doespermit the inclusion of likely + // undesired addresses such as multicast, therefore our + // definition of a "private" address also excludes + // non-forwardable IP addresses (as defined by the IETF). + "GetPrivateInterfaces": sockaddr.GetPrivateInterfaces, + + // GetPublicInterfaces - Returns a list of IfAddr that do not + // match RFC 6890, are attached to the default route, and are + // forwardable. + "GetPublicInterfaces": sockaddr.GetPublicInterfaces, + } + + SortFuncs = template.FuncMap{ + "sort": sockaddr.SortIfBy, + } + + FilterFuncs = template.FuncMap{ + "exclude": sockaddr.ExcludeIfs, + "include": sockaddr.IncludeIfs, + } + + HelperFuncs = template.FuncMap{ + // Misc functions that operate on IfAddrs inputs + "attr": Attr, + "join": sockaddr.JoinIfAddrs, + "limit": sockaddr.LimitIfAddrs, + "offset": sockaddr.OffsetIfAddrs, + "unique": sockaddr.UniqueIfAddrsBy, + + // Misc math functions that operate on a single IfAddr input + "math": sockaddr.IfAddrsMath, + + // Return a Private RFC 6890 IP address string that is attached + // to the default route and a forwardable address. + "GetPrivateIP": sockaddr.GetPrivateIP, + + // Return all Private RFC 6890 IP addresses as a space-delimited string of + // IP addresses. Addresses returned do not have to be on the interface with + // a default route. + "GetPrivateIPs": sockaddr.GetPrivateIPs, + + // Return a Public RFC 6890 IP address string that is attached + // to the default route and a forwardable address. + "GetPublicIP": sockaddr.GetPublicIP, + + // Return allPublic RFC 6890 IP addresses as a space-delimited string of IP + // addresses. Addresses returned do not have to be on the interface with a + // default route. + "GetPublicIPs": sockaddr.GetPublicIPs, + + // Return the first IP address of the named interface, sorted by + // the largest network size. + "GetInterfaceIP": sockaddr.GetInterfaceIP, + + // Return all IP addresses on the named interface, sorted by the largest + // network size. + "GetInterfaceIPs": sockaddr.GetInterfaceIPs, + } +} + +// Attr returns the attribute from the ifAddrRaw argument. If the argument is +// an IfAddrs, only the first element will be evaluated for resolution. +func Attr(selectorName string, ifAddrsRaw interface{}) (string, error) { + switch v := ifAddrsRaw.(type) { + case sockaddr.IfAddr: + return sockaddr.IfAttr(selectorName, v) + case sockaddr.IfAddrs: + return sockaddr.IfAttrs(selectorName, v) + default: + return "", fmt.Errorf("unable to obtain attribute %s from type %T (%v)", selectorName, ifAddrsRaw, ifAddrsRaw) + } +} + +// Parse parses input as template input using the addresses available on the +// host, then returns the string output if there are no errors. +func Parse(input string) (string, error) { + addrs, err := sockaddr.GetAllInterfaces() + if err != nil { + return "", errwrap.Wrapf("unable to query interface addresses: {{err}}", err) + } + + return ParseIfAddrs(input, addrs) +} + +// ParseIfAddrs parses input as template input using the IfAddrs inputs, then +// returns the string output if there are no errors. +func ParseIfAddrs(input string, ifAddrs sockaddr.IfAddrs) (string, error) { + return ParseIfAddrsTemplate(input, ifAddrs, template.New("sockaddr.Parse")) +} + +// ParseIfAddrsTemplate parses input as template input using the IfAddrs inputs, +// then returns the string output if there are no errors. +func ParseIfAddrsTemplate(input string, ifAddrs sockaddr.IfAddrs, tmplIn *template.Template) (string, error) { + // Create a template, add the function map, and parse the text. + tmpl, err := tmplIn.Option("missingkey=error"). + Funcs(SourceFuncs). + Funcs(SortFuncs). + Funcs(FilterFuncs). + Funcs(HelperFuncs). + Parse(input) + if err != nil { + return "", errwrap.Wrapf(fmt.Sprintf("unable to parse template %+q: {{err}}", input), err) + } + + var outWriter bytes.Buffer + err = tmpl.Execute(&outWriter, ifAddrs) + if err != nil { + return "", errwrap.Wrapf(fmt.Sprintf("unable to execute sockaddr input %+q: {{err}}", input), err) + } + + return outWriter.String(), nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pointerutil/pointer.go b/vendor/github.com/hashicorp/vault/sdk/helper/pointerutil/pointer.go new file mode 100644 index 000000000..73952313f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pointerutil/pointer.go @@ -0,0 +1,28 @@ +package pointerutil + +import ( + "os" + "time" +) + +// StringPtr returns a pointer to a string value +func StringPtr(s string) *string { + return &s +} + +// BoolPtr returns a pointer to a boolean value +func BoolPtr(b bool) *bool { + return &b +} + +// TimeDurationPtr returns a pointer to a time duration value +func TimeDurationPtr(duration string) *time.Duration { + d, _ := time.ParseDuration(duration) + + return &d +} + +// FileModePtr returns a pointer to the given os.FileMode +func FileModePtr(o os.FileMode) *os.FileMode { + return &o +} diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go deleted file mode 100644 index 7c634cdc5..000000000 --- a/vendor/github.com/lib/pq/oid/gen.go +++ /dev/null @@ -1,93 +0,0 @@ -// +build ignore - -// Generate the table of OID values -// Run with 'go run gen.go'. -package main - -import ( - "database/sql" - "fmt" - "log" - "os" - "os/exec" - "strings" - - _ "github.com/lib/pq" -) - -// OID represent a postgres Object Identifier Type. -type OID struct { - ID int - Type string -} - -// Name returns an upper case version of the oid type. -func (o OID) Name() string { - return strings.ToUpper(o.Type) -} - -func main() { - datname := os.Getenv("PGDATABASE") - sslmode := os.Getenv("PGSSLMODE") - - if datname == "" { - os.Setenv("PGDATABASE", "pqgotest") - } - - if sslmode == "" { - os.Setenv("PGSSLMODE", "disable") - } - - db, err := sql.Open("postgres", "") - if err != nil { - log.Fatal(err) - } - rows, err := db.Query(` - SELECT typname, oid - FROM pg_type WHERE oid < 10000 - ORDER BY oid; - `) - if err != nil { - log.Fatal(err) - } - oids := make([]*OID, 0) - for rows.Next() { - var oid OID - if err = rows.Scan(&oid.Type, &oid.ID); err != nil { - log.Fatal(err) - } - oids = append(oids, &oid) - } - if err = rows.Err(); err != nil { - log.Fatal(err) - } - cmd := exec.Command("gofmt") - cmd.Stderr = os.Stderr - w, err := cmd.StdinPipe() - if err != nil { - log.Fatal(err) - } - f, err := os.Create("types.go") - if err != nil { - log.Fatal(err) - } - cmd.Stdout = f - err = cmd.Start() - if err != nil { - log.Fatal(err) - } - fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.") - fmt.Fprintln(w, "\npackage oid") - fmt.Fprintln(w, "const (") - for _, oid := range oids { - fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID) - } - fmt.Fprintln(w, ")") - fmt.Fprintln(w, "var TypeName = map[Oid]string{") - for _, oid := range oids { - fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name()) - } - fmt.Fprintln(w, "}") - w.Close() - cmd.Wait() -} diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml new file mode 100644 index 000000000..16d1430aa --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 2FMhp57u8LcstKL9B190fLTcEnBtAAiEL diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE new file mode 100644 index 000000000..740fa9313 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md new file mode 100644 index 000000000..b1d235c78 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -0,0 +1,47 @@ +# go-shellwords + +[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master) +[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) + +Parse line as shell words. + +## Usage + +```go +args, err := shellwords.Parse("./foo --bar=baz") +// args should be ["./foo", "--bar=baz"] +``` + +```go +os.Setenv("FOO", "bar") +p := shellwords.NewParser() +p.ParseEnv = true +args, err := p.Parse("./foo $FOO") +// args should be ["./foo", "bar"] +``` + +```go +p := shellwords.NewParser() +p.ParseBacktick = true +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +```go +shellwords.ParseBacktick = true +p := shellwords.NewParser() +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +# Thanks + +This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). + +# License + +under the MIT License: http://mattn.mit-license.org/2017 + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/go.mod b/vendor/github.com/mattn/go-shellwords/go.mod new file mode 100644 index 000000000..8d96dbd5f --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.mod @@ -0,0 +1 @@ +module github.com/mattn/go-shellwords diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go new file mode 100644 index 000000000..41429d8f2 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -0,0 +1,195 @@ +package shellwords + +import ( + "errors" + "os" + "regexp" + "strings" +) + +var ( + ParseEnv bool = false + ParseBacktick bool = false +) + +var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`) + +func isSpace(r rune) bool { + switch r { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +func replaceEnv(getenv func(string) string, s string) string { + if getenv == nil { + getenv = os.Getenv + } + + return envRe.ReplaceAllStringFunc(s, func(s string) string { + s = s[1:] + if s[0] == '{' { + s = s[1 : len(s)-1] + } + return getenv(s) + }) +} + +type Parser struct { + ParseEnv bool + ParseBacktick bool + Position int + + // If ParseEnv is true, use this for getenv. + // If nil, use os.Getenv. + Getenv func(string) string +} + +func NewParser() *Parser { + return &Parser{ + ParseEnv: ParseEnv, + ParseBacktick: ParseBacktick, + Position: 0, + } +} + +func (p *Parser) Parse(line string) ([]string, error) { + args := []string{} + buf := "" + var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool + backtick := "" + + pos := -1 + got := false + +loop: + for i, r := range line { + if escaped { + buf += string(r) + escaped = false + continue + } + + if r == '\\' { + if singleQuoted { + buf += string(r) + } else { + escaped = true + } + continue + } + + if isSpace(r) { + if singleQuoted || doubleQuoted || backQuote || dollarQuote { + buf += string(r) + backtick += string(r) + } else if got { + if p.ParseEnv { + buf = replaceEnv(p.Getenv, buf) + } + args = append(args, buf) + buf = "" + got = false + } + continue + } + + switch r { + case '`': + if !singleQuoted && !doubleQuoted && !dollarQuote { + if p.ParseBacktick { + if backQuote { + out, err := shellRun(backtick) + if err != nil { + return nil, err + } + buf = out + } + backtick = "" + backQuote = !backQuote + continue + } + backtick = "" + backQuote = !backQuote + } + case ')': + if !singleQuoted && !doubleQuoted && !backQuote { + if p.ParseBacktick { + if dollarQuote { + out, err := shellRun(backtick) + if err != nil { + return nil, err + } + if r == ')' { + buf = buf[:len(buf)-len(backtick)-2] + out + } else { + buf = buf[:len(buf)-len(backtick)-1] + out + } + } + backtick = "" + dollarQuote = !dollarQuote + continue + } + backtick = "" + dollarQuote = !dollarQuote + } + case '(': + if !singleQuoted && !doubleQuoted && !backQuote { + if !dollarQuote && strings.HasSuffix(buf, "$") { + dollarQuote = true + buf += "(" + continue + } else { + return nil, errors.New("invalid command line string") + } + } + case '"': + if !singleQuoted && !dollarQuote { + doubleQuoted = !doubleQuoted + continue + } + case '\'': + if !doubleQuoted && !dollarQuote { + singleQuoted = !singleQuoted + continue + } + case ';', '&', '|', '<', '>': + if !(escaped || singleQuoted || doubleQuoted || backQuote) { + if r == '>' && len(buf) > 0 { + if c := buf[0]; '0' <= c && c <= '9' { + i -= 1 + got = false + } + } + pos = i + break loop + } + } + + got = true + buf += string(r) + if backQuote || dollarQuote { + backtick += string(r) + } + } + + if got { + if p.ParseEnv { + buf = replaceEnv(p.Getenv, buf) + } + args = append(args, buf) + } + + if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { + return nil, errors.New("invalid command line string") + } + + p.Position = pos + + return args, nil +} + +func Parse(line string) ([]string, error) { + return NewParser().Parse(line) +} diff --git a/vendor/github.com/mattn/go-shellwords/util_go15.go b/vendor/github.com/mattn/go-shellwords/util_go15.go new file mode 100644 index 000000000..180f00f0b --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_go15.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package shellwords + +import ( + "os" + "os/exec" + "runtime" + "strings" +) + +func shellRun(line string) (string, error) { + var b []byte + var err error + if runtime.GOOS == "windows" { + b, err = exec.Command(os.Getenv("COMSPEC"), "/c", line).Output() + } else { + b, err = exec.Command(os.Getenv("SHELL"), "-c", line).Output() + } + if err != nil { + return "", err + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go new file mode 100644 index 000000000..eaf1011d6 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -0,0 +1,22 @@ +// +build !windows,go1.6 + +package shellwords + +import ( + "errors" + "os" + "os/exec" + "strings" +) + +func shellRun(line string) (string, error) { + shell := os.Getenv("SHELL") + b, err := exec.Command(shell, "-c", line).Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", errors.New(err.Error() + ":" + string(b)) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go new file mode 100644 index 000000000..e46f89a1f --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -0,0 +1,22 @@ +// +build windows,go1.6 + +package shellwords + +import ( + "errors" + "os" + "os/exec" + "strings" +) + +func shellRun(line string) (string, error) { + shell := os.Getenv("COMSPEC") + b, err := exec.Command(shell, "/c", line).Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", errors.New(err.Error() + ":" + string(b)) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE new file mode 100644 index 000000000..a3866a291 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md new file mode 100644 index 000000000..28ce45a3e --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -0,0 +1,65 @@ +# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) + +hashstructure is a Go library for creating a unique hash value +for arbitrary values in Go. + +This can be used to key values in a hash (for use in a map, set, etc.) +that are complex. The most common use case is comparing two values without +sending data across the network, caching values locally (de-dup), and so on. + +## Features + + * Hash any arbitrary Go value, including complex types. + + * Tag a struct field to ignore it and not affect the hash value. + + * Tag a slice type struct field to treat it as a set where ordering + doesn't affect the hash code but the field itself is still taken into + account to create the hash value. + + * Optionally specify a custom hash function to optimize for speed, collision + avoidance for your data set, etc. + + * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, + allowing effective hashing of time.Time + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/hashstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). + +A quick code example is shown below: + +```go +type ComplexStruct struct { + Name string + Age uint + Metadata map[string]interface{} +} + +v := ComplexStruct{ + Name: "mitchellh", + Age: 64, + Metadata: map[string]interface{}{ + "car": true, + "location": "California", + "siblings": []string{"Bob", "John"}, + }, +} + +hash, err := hashstructure.Hash(v, nil) +if err != nil { + panic(err) +} + +fmt.Printf("%d", hash) +// Output: +// 2307517237273902113 +``` diff --git a/vendor/github.com/mitchellh/hashstructure/go.mod b/vendor/github.com/mitchellh/hashstructure/go.mod new file mode 100644 index 000000000..966582aa9 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/hashstructure diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go new file mode 100644 index 000000000..ea13a1583 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -0,0 +1,358 @@ +package hashstructure + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/fnv" + "reflect" +) + +// ErrNotStringer is returned when there's an error with hash:"string" +type ErrNotStringer struct { + Field string +} + +// Error implements error for ErrNotStringer +func (ens *ErrNotStringer) Error() string { + return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) +} + +// HashOptions are options that are available for hashing. +type HashOptions struct { + // Hasher is the hash function to use. If this isn't set, it will + // default to FNV. + Hasher hash.Hash64 + + // TagName is the struct tag to look at when hashing the structure. + // By default this is "hash". + TagName string + + // ZeroNil is flag determining if nil pointer should be treated equal + // to a zero value of pointed type. By default this is false. + ZeroNil bool +} + +// Hash returns the hash value of an arbitrary value. +// +// If opts is nil, then default options will be used. See HashOptions +// for the default values. The same *HashOptions value cannot be used +// concurrently. None of the values within a *HashOptions struct are +// safe to read/write while hashing is being done. +// +// Notes on the value: +// +// * Unexported fields on structs are ignored and do not affect the +// hash value. +// +// * Adding an exported field to a struct with the zero value will change +// the hash value. +// +// For structs, the hashing can be controlled using tags. For example: +// +// struct { +// Name string +// UUID string `hash:"ignore"` +// } +// +// The available tag values are: +// +// * "ignore" or "-" - The field will be ignored and not affect the hash code. +// +// * "set" - The field will be treated as a set, where ordering doesn't +// affect the hash code. This only works for slices. +// +// * "string" - The field will be hashed as a string, only works when the +// field implements fmt.Stringer +// +func Hash(v interface{}, opts *HashOptions) (uint64, error) { + // Create default options + if opts == nil { + opts = &HashOptions{} + } + if opts.Hasher == nil { + opts.Hasher = fnv.New64() + } + if opts.TagName == "" { + opts.TagName = "hash" + } + + // Reset the hash + opts.Hasher.Reset() + + // Create our walker and walk the structure + w := &walker{ + h: opts.Hasher, + tag: opts.TagName, + zeronil: opts.ZeroNil, + } + return w.visit(reflect.ValueOf(v), nil) +} + +type walker struct { + h hash.Hash64 + tag string + zeronil bool +} + +type visitOpts struct { + // Flags are a bitmask of flags to affect behavior of this visit + Flags visitFlag + + // Information about the struct containing this field + Struct interface{} + StructField string +} + +func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { + t := reflect.TypeOf(0) + + // Loop since these can be wrapped in multiple layers of pointers + // and interfaces. + for { + // If we have an interface, dereference it. We have to do this up + // here because it might be a nil in there and the check below must + // catch that. + if v.Kind() == reflect.Interface { + v = v.Elem() + continue + } + + if v.Kind() == reflect.Ptr { + if w.zeronil { + t = v.Type().Elem() + } + v = reflect.Indirect(v) + continue + } + + break + } + + // If it is nil, treat it like a zero. + if !v.IsValid() { + v = reflect.Zero(t) + } + + // Binary writing can use raw ints, we have to convert to + // a sized-int, we'll choose the largest... + switch v.Kind() { + case reflect.Int: + v = reflect.ValueOf(int64(v.Int())) + case reflect.Uint: + v = reflect.ValueOf(uint64(v.Uint())) + case reflect.Bool: + var tmp int8 + if v.Bool() { + tmp = 1 + } + v = reflect.ValueOf(tmp) + } + + k := v.Kind() + + // We can shortcut numeric values by directly binary writing them + if k >= reflect.Int && k <= reflect.Complex64 { + // A direct hash calculation + w.h.Reset() + err := binary.Write(w.h, binary.LittleEndian, v.Interface()) + return w.h.Sum64(), err + } + + switch k { + case reflect.Array: + var h uint64 + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + h = hashUpdateOrdered(w.h, h, current) + } + + return h, nil + + case reflect.Map: + var includeMap IncludableMap + if opts != nil && opts.Struct != nil { + if v, ok := opts.Struct.(IncludableMap); ok { + includeMap = v + } + } + + // Build the hash for the map. We do this by XOR-ing all the key + // and value hashes. This makes it deterministic despite ordering. + var h uint64 + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + if includeMap != nil { + incl, err := includeMap.HashIncludeMap( + opts.StructField, k.Interface(), v.Interface()) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + kh, err := w.visit(k, nil) + if err != nil { + return 0, err + } + vh, err := w.visit(v, nil) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + + return h, nil + + case reflect.Struct: + parent := v.Interface() + var include Includable + if impl, ok := parent.(Includable); ok { + include = impl + } + + t := v.Type() + h, err := w.visit(reflect.ValueOf(t.Name()), nil) + if err != nil { + return 0, err + } + + l := v.NumField() + for i := 0; i < l; i++ { + if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag + fieldType := t.Field(i) + if fieldType.PkgPath != "" { + // Unexported + continue + } + + tag := fieldType.Tag.Get(w.tag) + if tag == "ignore" || tag == "-" { + // Ignore this field + continue + } + + // if string is set, use the string value + if tag == "string" { + if impl, ok := innerV.Interface().(fmt.Stringer); ok { + innerV = reflect.ValueOf(impl.String()) + } else { + return 0, &ErrNotStringer{ + Field: v.Type().Field(i).Name, + } + } + } + + // Check if we implement includable and check it + if include != nil { + incl, err := include.HashInclude(fieldType.Name, innerV) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + switch tag { + case "set": + f |= visitFlagSet + } + + kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) + if err != nil { + return 0, err + } + + vh, err := w.visit(innerV, &visitOpts{ + Flags: f, + Struct: parent, + StructField: fieldType.Name, + }) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + } + + return h, nil + + case reflect.Slice: + // We have two behaviors here. If it isn't a set, then we just + // visit all the elements. If it is a set, then we do a deterministic + // hash code. + var h uint64 + var set bool + if opts != nil { + set = (opts.Flags & visitFlagSet) != 0 + } + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + if set { + h = hashUpdateUnordered(h, current) + } else { + h = hashUpdateOrdered(w.h, h, current) + } + } + + return h, nil + + case reflect.String: + // Directly hash + w.h.Reset() + _, err := w.h.Write([]byte(v.String())) + return w.h.Sum64(), err + + default: + return 0, fmt.Errorf("unknown kind to hash: %s", k) + } + +} + +func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { + // For ordered updates, use a real hash function + h.Reset() + + // We just panic if the binary writes fail because we are writing + // an int64 which should never be fail-able. + e1 := binary.Write(h, binary.LittleEndian, a) + e2 := binary.Write(h, binary.LittleEndian, b) + if e1 != nil { + panic(e1) + } + if e2 != nil { + panic(e2) + } + + return h.Sum64() +} + +func hashUpdateUnordered(a, b uint64) uint64 { + return a ^ b +} + +// visitFlag is used as a bitmask for affecting visit behavior +type visitFlag uint + +const ( + visitFlagInvalid visitFlag = iota + visitFlagSet = iota << 1 +) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go new file mode 100644 index 000000000..b6289c0be --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -0,0 +1,15 @@ +package hashstructure + +// Includable is an interface that can optionally be implemented by +// a struct. It will be called for each field in the struct to check whether +// it should be included in the hash. +type Includable interface { + HashInclude(field string, v interface{}) (bool, error) +} + +// IncludableMap is an interface that can optionally be implemented by +// a struct. It will be called when a map-type field is found to ask the +// struct if the map item should be included in the hash. +type IncludableMap interface { + HashIncludeMap(field string, k, v interface{}) (bool, error) +} diff --git a/vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go b/vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go deleted file mode 100644 index d840f5a7d..000000000 --- a/vendor/github.com/ory/dockertest/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/ory/dockertest/docker/pkg/archive" - "github.com/sirupsen/logrus" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go b/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go deleted file mode 100644 index dd6ddc4f7..000000000 --- a/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build ignore -// Hand writing: _Ctype_struct___0 - -/* -Input to cgo -godefs. - -*/ - -package disk - -/* -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -// because statinfo has long double snap_time, redefine with changing long long -struct statinfo2 { - long cp_time[CPUSTATES]; - long tk_nin; - long tk_nout; - struct devinfo *dinfo; - long long snap_time; -}; -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeofLongDouble = C.sizeof_longlong - - DEVSTAT_NO_DATA = 0x00 - DEVSTAT_READ = 0x01 - DEVSTAT_WRITE = 0x02 - DEVSTAT_FREE = 0x03 - - // from sys/mount.h - MNT_RDONLY = 0x00000001 /* read only filesystem */ - MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */ - MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ - MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ - MNT_UNION = 0x00000020 /* union with underlying filesystem */ - MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */ - MNT_SUIDDIR = 0x00100000 /* special handling of SUID on dirs */ - MNT_SOFTDEP = 0x00200000 /* soft updates being done */ - MNT_NOSYMFOLLOW = 0x00400000 /* do not follow symlinks */ - MNT_GJOURNAL = 0x02000000 /* GEOM journal support enabled */ - MNT_MULTILABEL = 0x04000000 /* MAC support for individual objects */ - MNT_ACLS = 0x08000000 /* ACL support enabled */ - MNT_NOATIME = 0x10000000 /* disable update of file access time */ - MNT_NOCLUSTERR = 0x40000000 /* disable cluster read */ - MNT_NOCLUSTERW = 0x80000000 /* disable cluster write */ - MNT_NFS4ACLS = 0x00000010 - - MNT_WAIT = 1 /* synchronously wait for I/O to complete */ - MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */ - MNT_LAZY = 3 /* push data not written by filesystem syncer */ - MNT_SUSPEND = 4 /* Suspend file system after sync */ -) - -const ( - sizeOfDevstat = C.sizeof_struct_devstat -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong - _C_long_double C.longlong -) - -type Statfs C.struct_statfs -type Fsid C.struct_fsid - -type Devstat C.struct_devstat -type Bintime C.struct_bintime diff --git a/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go b/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go deleted file mode 100644 index 1e3ddef5c..000000000 --- a/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build ignore -// Hand writing: _Ctype_struct___0 - -/* -Input to cgo -godefs. -*/ - -package disk - -/* -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeofLongDouble = C.sizeof_longlong - - DEVSTAT_NO_DATA = 0x00 - DEVSTAT_READ = 0x01 - DEVSTAT_WRITE = 0x02 - DEVSTAT_FREE = 0x03 - - // from sys/mount.h - MNT_RDONLY = 0x00000001 /* read only filesystem */ - MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */ - MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ - MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ - MNT_NODEV = 0x00000010 /* don't interpret special files */ - MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */ - - MNT_WAIT = 1 /* synchronously wait for I/O to complete */ - MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */ - MNT_LAZY = 3 /* push data not written by filesystem syncer */ -) - -const ( - sizeOfDiskstats = C.sizeof_struct_diskstats -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong - _C_long_double C.longlong -) - -type Statfs C.struct_statfs -type Diskstats C.struct_diskstats -type Fsid C.fsid_t -type Timeval C.struct_timeval - -type Diskstat C.struct_diskstat -type Bintime C.struct_bintime diff --git a/vendor/github.com/shirou/gopsutil/host/types_darwin.go b/vendor/github.com/shirou/gopsutil/host/types_darwin.go deleted file mode 100644 index b85822788..000000000 --- a/vendor/github.com/shirou/gopsutil/host/types_darwin.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build ignore -// plus hand editing about timeval - -/* -Input to cgo -godefs. -*/ - -package host - -/* -#include -#include -*/ -import "C" - -type Utmpx C.struct_utmpx -type Timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/host/types_freebsd.go b/vendor/github.com/shirou/gopsutil/host/types_freebsd.go deleted file mode 100644 index bbdce0c6a..000000000 --- a/vendor/github.com/shirou/gopsutil/host/types_freebsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build ignore - -/* -Input to cgo -godefs. -*/ - -package host - -/* -#define KERNEL -#include -#include -#include -#include "freebsd_headers/utxdb.h" - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeOfUtmpx = C.sizeof_struct_futx -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type Utmp C.struct_utmp // for FreeBSD 9.0 compatibility -type Utmpx C.struct_futx diff --git a/vendor/github.com/shirou/gopsutil/host/types_linux.go b/vendor/github.com/shirou/gopsutil/host/types_linux.go deleted file mode 100644 index 8adecb6cf..000000000 --- a/vendor/github.com/shirou/gopsutil/host/types_linux.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build ignore - -/* -Input to cgo -godefs. -*/ - -package host - -/* -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeOfUtmp = C.sizeof_struct_utmp -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type utmp C.struct_utmp -type exit_status C.struct_exit_status -type timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/host/types_openbsd.go b/vendor/github.com/shirou/gopsutil/host/types_openbsd.go deleted file mode 100644 index 9ebb97ce5..000000000 --- a/vendor/github.com/shirou/gopsutil/host/types_openbsd.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build ignore - -/* -Input to cgo -godefs. -*/ - -package host - -/* -#define KERNEL -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong - sizeOfUtmp = C.sizeof_struct_utmp -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type Utmp C.struct_utmp -type Timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go b/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go deleted file mode 100644 index 83cb91a19..000000000 --- a/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build ignore - -/* -Input to cgo -godefs. -*/ - -package mem - -/* -#include -#include -#include -#include - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - CTLVm = 2 - CTLVfs = 10 - VmUvmexp = 4 // get uvmexp - VfsGeneric = 0 - VfsBcacheStat = 3 -) - -const ( - sizeOfUvmexp = C.sizeof_struct_uvmexp - sizeOfBcachestats = C.sizeof_struct_bcachestats -) - -type Uvmexp C.struct_uvmexp -type Bcachestats C.struct_bcachestats diff --git a/vendor/github.com/shirou/gopsutil/process/types_darwin.go b/vendor/github.com/shirou/gopsutil/process/types_darwin.go deleted file mode 100644 index 21216cd09..000000000 --- a/vendor/github.com/shirou/gopsutil/process/types_darwin.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Hand Writing -// - all pointer in ExternProc to uint64 - -// +build ignore - -/* -Input to cgo -godefs. -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ -// +godefs map struct_ [16]byte /* in6_addr */ - -package process - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -struct ucred_queue { - struct ucred *tqe_next; - struct ucred **tqe_prev; - TRACEBUF -}; - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type UGid_t C.gid_t - -type KinfoProc C.struct_kinfo_proc - -type Eproc C.struct_eproc - -type Proc C.struct_proc - -type Session C.struct_session - -type ucred C.struct_ucred - -type Uucred C.struct__ucred - -type Upcred C.struct__pcred - -type Vmspace C.struct_vmspace - -type Sigacts C.struct_sigacts - -type ExternProc C.struct_extern_proc - -type Itimerval C.struct_itimerval - -type Vnode C.struct_vnode - -type Pgrp C.struct_pgrp - -type UserStruct C.struct_user - -type Au_session C.struct_au_session - -type Posix_cred C.struct_posix_cred - -type Label C.struct_label - -type AuditinfoAddr C.struct_auditinfo_addr -type AuMask C.struct_au_mask -type AuTidAddr C.struct_au_tid_addr - -// TAILQ(ucred) -type UcredQueue C.struct_ucred_queue diff --git a/vendor/github.com/shirou/gopsutil/process/types_freebsd.go b/vendor/github.com/shirou/gopsutil/process/types_freebsd.go deleted file mode 100644 index aa7b3462d..000000000 --- a/vendor/github.com/shirou/gopsutil/process/types_freebsd.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build ignore - -// We still need editing by hands. -// go tool cgo -godefs types_freebsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_freebsd_amd64.go - -/* -Input to cgo -godefs. -*/ - -// +godefs map struct_pargs int64 /* pargs */ -// +godefs map struct_proc int64 /* proc */ -// +godefs map struct_user int64 /* user */ -// +godefs map struct_vnode int64 /* vnode */ -// +godefs map struct_vnode int64 /* vnode */ -// +godefs map struct_filedesc int64 /* filedesc */ -// +godefs map struct_vmspace int64 /* vmspace */ -// +godefs map struct_pcb int64 /* pcb */ -// +godefs map struct_thread int64 /* thread */ -// +godefs map struct___sigset [16]byte /* sigset */ - -package process - -/* -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - CTLKern = 1 // "high kernel": proc, limits - KernProc = 14 // struct: process entries - KernProcPID = 1 // by process id - KernProcProc = 8 // only return procs - KernProcPathname = 12 // path to executable - KernProcArgs = 7 // get/set arguments/proctitle -) - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong -) - -const ( - sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry - sizeOfKinfoProc = C.sizeof_struct_kinfo_proc -) - -// from sys/proc.h -const ( - SIDL = 1 /* Process being created by fork. */ - SRUN = 2 /* Currently runnable. */ - SSLEEP = 3 /* Sleeping on an address. */ - SSTOP = 4 /* Process debugging or suspension. */ - SZOMB = 5 /* Awaiting collection by parent. */ - SWAIT = 6 /* Waiting for interrupt. */ - SLOCK = 7 /* Blocked on a lock. */ -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type KinfoProc C.struct_kinfo_proc - -type Priority C.struct_priority - -type KinfoVmentry C.struct_kinfo_vmentry diff --git a/vendor/github.com/shirou/gopsutil/process/types_openbsd.go b/vendor/github.com/shirou/gopsutil/process/types_openbsd.go deleted file mode 100644 index 09ac59028..000000000 --- a/vendor/github.com/shirou/gopsutil/process/types_openbsd.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build ignore - -// We still need editing by hands. -// go tool cgo -godefs types_openbsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_openbsd_amd64.go - -/* -Input to cgo -godefs. -*/ - -// +godefs map struct_pargs int64 /* pargs */ -// +godefs map struct_proc int64 /* proc */ -// +godefs map struct_user int64 /* user */ -// +godefs map struct_vnode int64 /* vnode */ -// +godefs map struct_vnode int64 /* vnode */ -// +godefs map struct_filedesc int64 /* filedesc */ -// +godefs map struct_vmspace int64 /* vmspace */ -// +godefs map struct_pcb int64 /* pcb */ -// +godefs map struct_thread int64 /* thread */ -// +godefs map struct___sigset [16]byte /* sigset */ - -package process - -/* -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - - -*/ -import "C" - -// Machine characteristics; for internal use. - -const ( - CTLKern = 1 // "high kernel": proc, limits - KernProc = 66 // struct: process entries - KernProcAll = 0 - KernProcPID = 1 // by process id - KernProcProc = 8 // only return procs - KernProcPathname = 12 // path to executable - KernProcArgs = 55 // get/set arguments/proctitle - KernProcArgv = 1 - KernProcEnv = 3 -) - -const ( - ArgMax = 256 * 1024 // sys/syslimits.h:#define ARG_MAX -) - -const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong -) - -const ( - sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry - sizeOfKinfoProc = C.sizeof_struct_kinfo_proc -) - -// from sys/proc.h -const ( - SIDL = 1 /* Process being created by fork. */ - SRUN = 2 /* Currently runnable. */ - SSLEEP = 3 /* Sleeping on an address. */ - SSTOP = 4 /* Process debugging or suspension. */ - SZOMB = 5 /* Awaiting collection by parent. */ - SDEAD = 6 /* Thread is almost gone */ - SONPROC = 7 /* Thread is currently on a CPU. */ -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type KinfoProc C.struct_kinfo_proc - -type Priority C.struct_priority - -type KinfoVmentry C.struct_kinfo_vmentry diff --git a/vendor/github.com/ugorji/go/codec/xml.go b/vendor/github.com/ugorji/go/codec/xml.go deleted file mode 100644 index 19fc36caf..000000000 --- a/vendor/github.com/ugorji/go/codec/xml.go +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build ignore - -package codec - -import "reflect" - -/* - -A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder. - -We are attempting this due to perceived issues with encoding/xml: - - Complicated. It tried to do too much, and is not as simple to use as json. - - Due to over-engineering, reflection is over-used AND performance suffers: - java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/ - even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html - -codec framework will offer the following benefits - - VASTLY improved performance (when using reflection-mode or codecgen) - - simplicity and consistency: with the rest of the supported formats - - all other benefits of codec framework (streaming, codegeneration, etc) - -codec is not a drop-in replacement for encoding/xml. -It is a replacement, based on the simplicity and performance of codec. -Look at it like JAXB for Go. - -Challenges: - - Need to output XML preamble, with all namespaces at the right location in the output. - - Each "end" block is dynamic, so we need to maintain a context-aware stack - - How to decide when to use an attribute VS an element - - How to handle chardata, attr, comment EXPLICITLY. - - Should it output fragments? - e.g. encoding a bool should just output true OR false, which is not well-formed XML. - -Extend the struct tag. See representative example: - type X struct { - ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"` - // format: [namespace-uri ][namespace-prefix ]local-name, ... - } - -Based on this, we encode - - fields as elements, BUT - encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string) - - text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata". - -To handle namespaces: - - XMLHandle is denoted as being namespace-aware. - Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name. - - *Encoder and *Decoder know whether the Handle "prefers" namespaces. - - add *Encoder.getEncName(*structFieldInfo). - No one calls *structFieldInfo.indexForEncName directly anymore - - OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware - indexForEncName takes a parameter of the form namespace:local-name OR local-name - - add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc - by being a method on *Decoder, or maybe a method on the Handle itself. - No one accesses .encName anymore - - let encode.go and decode.go use these (for consistency) - - only problem exists for gen.go, where we create a big switch on encName. - Now, we also have to add a switch on strings.endsWith(kName, encNsName) - - gen.go will need to have many more methods, and then double-on the 2 switch loops like: - switch k { - case "abc" : x.abc() - case "def" : x.def() - default { - switch { - case !nsAware: panic(...) - case strings.endsWith(":abc"): x.abc() - case strings.endsWith(":def"): x.def() - default: panic(...) - } - } - } - -The structure below accommodates this: - - type typeInfo struct { - sfi []*structFieldInfo // sorted by encName - sfins // sorted by namespace - sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately. - sfip // unsorted - } - type structFieldInfo struct { - encName - nsEncName - ns string - attr bool - cdata bool - } - -indexForEncName is now an internal helper function that takes a sorted array -(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...) - -There will be a separate parser from the builder. -The parser will have a method: next() xmlToken method. It has lookahead support, -so you can pop multiple tokens, make a determination, and push them back in the order popped. -This will be needed to determine whether we are "nakedly" decoding a container or not. -The stack will be implemented using a slice and push/pop happens at the [0] element. - -xmlToken has fields: - - type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text - - value string - - ns string - -SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL - -The following are skipped when parsing: - - External Entities (from external file) - - Notation Declaration e.g. - - Entity Declarations & References - - XML Declaration (assume UTF-8) - - XML Directive i.e. - - Other Declarations: Notation, etc. - - Comment - - Processing Instruction - - schema / DTD for validation: - We are not a VALIDATING parser. Validation is done elsewhere. - However, some parts of the DTD internal subset are used (SEE BELOW). - For Attribute List Declarations e.g. - - We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED) - -The following XML features are supported - - Namespace - - Element - - Attribute - - cdata - - Unicode escape - -The following DTD (when as an internal sub-set) features are supported: - - Internal Entities e.g. - AND entities for the set: [<>&"'] - - Parameter entities e.g. - - -At decode time, a structure containing the following is kept - - namespace mapping - - default attribute values - - all internal entities (<>&"' and others written in the document) - -When decode starts, it parses XML namespace declarations and creates a map in the -xmlDecDriver. While parsing, that map continuously gets updated. -The only problem happens when a namespace declaration happens on the node that it defines. -e.g. -To handle this, each Element must be fully parsed at a time, -even if it amounts to multiple tokens which are returned one at a time on request. - -xmlns is a special attribute name. - - It is used to define namespaces, including the default - - It is never returned as an AttrKey or AttrVal. - *We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.* - -Number, bool, null, mapKey, etc can all be decoded from any xmlToken. -This accommodates map[int]string for example. - -It should be possible to create a schema from the types, -or vice versa (generate types from schema with appropriate tags). -This is however out-of-scope from this parsing project. - -We should write all namespace information at the first point that it is referenced in the tree, -and use the mapping for all child nodes and attributes. This means that state is maintained -at a point in the tree. This also means that calls to Decode or MustDecode will reset some state. - -When decoding, it is important to keep track of entity references and default attribute values. -It seems these can only be stored in the DTD components. We should honor them when decoding. - -Configuration for XMLHandle will look like this: - - XMLHandle - DefaultNS string - // Encoding: - NS map[string]string // ns URI to key, used for encoding - // Decoding: in case ENTITY declared in external schema or dtd, store info needed here - Entities map[string]string // map of entity rep to character - - -During encode, if a namespace mapping is not defined for a namespace found on a struct, -then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict -with any other namespace mapping). - -Note that different fields in a struct can have different namespaces. -However, all fields will default to the namespace on the _struct field (if defined). - -An XML document is a name, a map of attributes and a list of children. -Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example). -We have to "DecodeNaked" into something that resembles XML data. - -To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types: - type Name struct { // Preferred. Less allocations due to conversions. - Local string - Space string - } - type Element struct { - Name Name - Attrs map[Name]string - Children []interface{} // each child is either *Element or string - } -Only two "supporting" types are exposed for XML: Name and Element. - -// ------------------ - -We considered 'type Name string' where Name is like "Space Local" (space-separated). -We decided against it, because each creation of a name would lead to -double allocation (first convert []byte to string, then concatenate them into a string). -The benefit is that it is faster to read Attrs from a map. But given that Element is a value -object, we want to eschew methods and have public exposed variables. - -We also considered the following, where xml types were not value objects, and we used -intelligent accessor methods to extract information and for performance. -*** WE DECIDED AGAINST THIS. *** - type Attr struct { - Name Name - Value string - } - // Element is a ValueObject: There are no accessor methods. - // Make element self-contained. - type Element struct { - Name Name - attrsMap map[string]string // where key is "Space Local" - attrs []Attr - childrenT []string - childrenE []Element - childrenI []int // each child is a index into T or E. - } - func (x *Element) child(i) interface{} // returns string or *Element - -// ------------------ - -Per XML spec and our default handling, white space is always treated as -insignificant between elements, except in a text node. The xml:space='preserve' -attribute is ignored. - -**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.** -**So treat them as just "directives" that should be interpreted to mean something**. - -On encoding, we support indenting aka prettifying markup in the same way we support it for json. - -A document or element can only be encoded/decoded from/to a struct. In this mode: - - struct name maps to element name (or tag-info from _struct field) - - fields are mapped to child elements or attributes - -A map is either encoded as attributes on current element, or as a set of child elements. -Maps are encoded as attributes iff their keys and values are primitives (number, bool, string). - -A list is encoded as a set of child elements. - -Primitives (number, bool, string) are encoded as an element, attribute or text -depending on the context. - -Extensions must encode themselves as a text string. - -Encoding is tough, specifically when encoding mappings, because we need to encode -as either attribute or element. To do this, we need to default to encoding as attributes, -and then let Encoder inform the Handle when to start encoding as nodes. -i.e. Encoder does something like: - - h.EncodeMapStart() - h.Encode(), h.Encode(), ... - h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal - h.Encode(), h.Encode(), ... - h.EncodeEnd() - -Only XMLHandle understands this, and will set itself to start encoding as elements. - -This support extends to maps. For example, if a struct field is a map, and it has -the struct tag signifying it should be attr, then all its fields are encoded as attributes. -e.g. - - type X struct { - M map[string]int `codec:"m,attr"` // encode keys as attributes named - } - -Question: - - if encoding a map, what if map keys have spaces in them??? - Then they cannot be attributes or child elements. Error. - -Options to consider adding later: - - For attribute values, normalize by trimming beginning and ending white space, - and converting every white space sequence to a single space. - - ATTLIST restrictions are enforced. - e.g. default value of xml:space, skipping xml:XYZ style attributes, etc. - - Consider supporting NON-STRICT mode (e.g. to handle HTML parsing). - Some elements e.g. br, hr, etc need not close and should be auto-closed - ... (see http://www.w3.org/TR/html4/loose.dtd) - An expansive set of entities are pre-defined. - - Have easy way to create a HTML parser: - add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose, - and add HTML Entities to the list. - - Support validating element/attribute XMLName before writing it. - Keep this behind a flag, which is set to false by default (for performance). - type XMLHandle struct { - CheckName bool - } - -Misc: - -ROADMAP (1 weeks): - - build encoder (1 day) - - build decoder (based off xmlParser) (1 day) - - implement xmlParser (2 days). - Look at encoding/xml for inspiration. - - integrate and TEST (1 days) - - write article and post it (1 day) - -// ---------- MORE NOTES FROM 2017-11-30 ------------ - -when parsing -- parse the attributes first -- then parse the nodes - -basically: -- if encoding a field: we use the field name for the wrapper -- if encoding a non-field, then just use the element type name - - map[string]string ==> abcval... or - val... OR - val1val2... <- PREFERED - []string ==> v1v2... - string v1 ==> v1 - bool true ==> true - float 1.0 ==> 1.0 - ... - - F1 map[string]string ==> abcval... OR - val... OR - val... <- PREFERED - F2 []string ==> v1v2... - F3 bool ==> true - ... - -- a scalar is encoded as: - (value) of type T ==> - (value) of field F ==> -- A kv-pair is encoded as: - (key,value) ==> OR - (key,value) of field F ==> OR -- A map or struct is just a list of kv-pairs -- A list is encoded as sequences of same node e.g. - - - value21 - value22 -- we may have to singularize the field name, when entering into xml, - and pluralize them when encoding. -- bi-directional encode->decode->encode is not a MUST. - even encoding/xml cannot decode correctly what was encoded: - - see https://play.golang.org/p/224V_nyhMS - func main() { - fmt.Println("Hello, playground") - v := []interface{}{"hello", 1, true, nil, time.Now()} - s, err := xml.Marshal(v) - fmt.Printf("err: %v, \ns: %s\n", err, s) - var v2 []interface{} - err = xml.Unmarshal(s, &v2) - fmt.Printf("err: %v, \nv2: %v\n", err, v2) - type T struct { - V []interface{} - } - v3 := T{V: v} - s, err = xml.Marshal(v3) - fmt.Printf("err: %v, \ns: %s\n", err, s) - var v4 T - err = xml.Unmarshal(s, &v4) - fmt.Printf("err: %v, \nv4: %v\n", err, v4) - } - Output: - err: , - s: hello1true - err: , - v2: [] - err: , - s: hello1true2009-11-10T23:00:00Z - err: , - v4: {[ ]} -- -*/ - -// ----------- PARSER ------------------- - -type xmlTokenType uint8 - -const ( - _ xmlTokenType = iota << 1 - xmlTokenElemStart - xmlTokenElemEnd - xmlTokenAttrKey - xmlTokenAttrVal - xmlTokenText -) - -type xmlToken struct { - Type xmlTokenType - Value string - Namespace string // blank for AttrVal and Text -} - -type xmlParser struct { - r decReader - toks []xmlToken // list of tokens. - ptr int // ptr into the toks slice - done bool // nothing else to parse. r now returns EOF. -} - -func (x *xmlParser) next() (t *xmlToken) { - // once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish) - if !x.done && len(x.toks) == 0 { - x.nextTag() - } - // parses one element at a time (into possible many tokens) - if x.ptr < len(x.toks) { - t = &(x.toks[x.ptr]) - x.ptr++ - if x.ptr == len(x.toks) { - x.ptr = 0 - x.toks = x.toks[:0] - } - } - return -} - -// nextTag will parses the next element and fill up toks. -// It set done flag if/once EOF is reached. -func (x *xmlParser) nextTag() { - // TODO: implement. -} - -// ----------- ENCODER ------------------- - -type xmlEncDriver struct { - e *Encoder - w encWriter - h *XMLHandle - b [64]byte // scratch - bs []byte // scratch - // s jsonStack - noBuiltInTypes -} - -// ----------- DECODER ------------------- - -type xmlDecDriver struct { - d *Decoder - h *XMLHandle - r decReader // *bytesDecReader decReader - ct valueType // container type. one of unset, array or map. - bstr [8]byte // scratch used for string \UXXX parsing - b [64]byte // scratch - - // wsSkipped bool // whitespace skipped - - // s jsonStack - - noBuiltInTypes -} - -// DecodeNaked will decode into an XMLNode - -// XMLName is a value object representing a namespace-aware NAME -type XMLName struct { - Local string - Space string -} - -// XMLNode represents a "union" of the different types of XML Nodes. -// Only one of fields (Text or *Element) is set. -type XMLNode struct { - Element *Element - Text string -} - -// XMLElement is a value object representing an fully-parsed XML element. -type XMLElement struct { - Name Name - Attrs map[XMLName]string - // Children is a list of child nodes, each being a *XMLElement or string - Children []XMLNode -} - -// ----------- HANDLE ------------------- - -type XMLHandle struct { - BasicHandle - textEncodingType - - DefaultNS string - NS map[string]string // ns URI to key, for encoding - Entities map[string]string // entity representation to string, for encoding. -} - -func (h *XMLHandle) newEncDriver(e *Encoder) encDriver { - return &xmlEncDriver{e: e, w: e.w, h: h} -} - -func (h *XMLHandle) newDecDriver(d *Decoder) decDriver { - // d := xmlDecDriver{r: r.(*bytesDecReader), h: h} - hd := xmlDecDriver{d: d, r: d.r, h: h} - hd.n.bytes = d.b[:] - return &hd -} - -func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) -} - -var _ decDriver = (*xmlDecDriver)(nil) -var _ encDriver = (*xmlEncDriver)(nil) diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go deleted file mode 100644 index 855e60aee..000000000 --- a/vendor/github.com/ulikunitz/xz/example.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "io" - "log" - "os" - - "github.com/ulikunitz/xz" -) - -func main() { - const text = "The quick brown fox jumps over the lazy dog.\n" - var buf bytes.Buffer - // compress text - w, err := xz.NewWriter(&buf) - if err != nil { - log.Fatalf("xz.NewWriter error %s", err) - } - if _, err := io.WriteString(w, text); err != nil { - log.Fatalf("WriteString error %s", err) - } - if err := w.Close(); err != nil { - log.Fatalf("w.Close error %s", err) - } - // decompress buffer and write output to stdout - r, err := xz.NewReader(&buf) - if err != nil { - log.Fatalf("NewReader error %s", err) - } - if _, err = io.Copy(os.Stdout, r); err != nil { - log.Fatalf("io.Copy error %s", err) - } -} diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 4548b993d..000000000 --- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build go1.12\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) - } -} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index eb4332059..000000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - if goos == "aix" { - // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t - // to avoid having both StTimespec and Timespec. - sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) - b = sttimespec.ReplaceAll(b, []byte("Timespec")) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // Rename Stat_t time fields - if goos == "freebsd" && goarch == "386" { - // Hide Stat_t.[AMCB]tim_ext fields - renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) - b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) - } - renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) - b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index e4af9424e..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - if sysname == "getdirentries64" { - // Special case - libSystem name and - // raw syscall name don't match. - sysname = "__getdirentries64" - } - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index 3be3cdfc3..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) - } else { - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - } - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index c96009951..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - var callgccgo string - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) - } else { - callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - } - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738b..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go deleted file mode 100644 index b6b409909..000000000 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. -// -// Build a MIB with each entry being an array containing the level, type and -// a hash that will contain additional entries if the current entry is a node. -// We then walk this MIB and create a flattened sysctl name to OID hash. - -package main - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments. -func cmdLine() string { - return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags. -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -// reMatch performs regular expression match and stores the substring slice to value pointed by m. -func reMatch(re *regexp.Regexp, str string, m *[]string) bool { - *m = re.FindStringSubmatch(str) - if *m != nil { - return true - } - return false -} - -type nodeElement struct { - n int - t string - pE *map[string]nodeElement -} - -var ( - debugEnabled bool - mib map[string]nodeElement - node *map[string]nodeElement - nodeMap map[string]string - sysCtl []string -) - -var ( - ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) - ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) - ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) - netInetRE = regexp.MustCompile(`^netinet/`) - netInet6RE = regexp.MustCompile(`^netinet6/`) - netRE = regexp.MustCompile(`^net/`) - bracesRE = regexp.MustCompile(`{.*}`) - ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) - fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) -) - -func debug(s string) { - if debugEnabled { - fmt.Fprintln(os.Stderr, s) - } -} - -// Walk the MIB and build a sysctl name to OID mapping. -func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { - lNode := pNode // local copy of pointer to node - var keys []string - for k := range *lNode { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, key := range keys { - nodename := name - if name != "" { - nodename += "." - } - nodename += key - - nodeoid := append(oid, (*pNode)[key].n) - - if (*pNode)[key].t == `CTLTYPE_NODE` { - if _, ok := nodeMap[nodename]; ok { - lNode = &mib - ctlName := nodeMap[nodename] - for _, part := range strings.Split(ctlName, ".") { - lNode = ((*lNode)[part]).pE - } - } else { - lNode = (*pNode)[key].pE - } - buildSysctl(lNode, nodename, nodeoid) - } else if (*pNode)[key].t != "" { - oidStr := []string{} - for j := range nodeoid { - oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) - } - text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" - sysCtl = append(sysCtl, text) - } - } -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - mib = make(map[string]nodeElement) - headers := [...]string{ - `sys/sysctl.h`, - `sys/socket.h`, - `sys/tty.h`, - `sys/malloc.h`, - `sys/mount.h`, - `sys/namei.h`, - `sys/sem.h`, - `sys/shm.h`, - `sys/vmmeter.h`, - `uvm/uvmexp.h`, - `uvm/uvm_param.h`, - `uvm/uvm_swap_encrypt.h`, - `ddb/db_var.h`, - `net/if.h`, - `net/if_pfsync.h`, - `net/pipex.h`, - `netinet/in.h`, - `netinet/icmp_var.h`, - `netinet/igmp_var.h`, - `netinet/ip_ah.h`, - `netinet/ip_carp.h`, - `netinet/ip_divert.h`, - `netinet/ip_esp.h`, - `netinet/ip_ether.h`, - `netinet/ip_gre.h`, - `netinet/ip_ipcomp.h`, - `netinet/ip_ipip.h`, - `netinet/pim_var.h`, - `netinet/tcp_var.h`, - `netinet/udp_var.h`, - `netinet6/in6.h`, - `netinet6/ip6_divert.h`, - `netinet6/pim6_var.h`, - `netinet/icmp6.h`, - `netmpls/mpls.h`, - } - - ctls := [...]string{ - `kern`, - `vm`, - `fs`, - `net`, - //debug /* Special handling required */ - `hw`, - //machdep /* Arch specific */ - `user`, - `ddb`, - //vfs /* Special handling required */ - `fs.posix`, - `kern.forkstat`, - `kern.intrcnt`, - `kern.malloc`, - `kern.nchstats`, - `kern.seminfo`, - `kern.shminfo`, - `kern.timecounter`, - `kern.tty`, - `kern.watchdog`, - `net.bpf`, - `net.ifq`, - `net.inet`, - `net.inet.ah`, - `net.inet.carp`, - `net.inet.divert`, - `net.inet.esp`, - `net.inet.etherip`, - `net.inet.gre`, - `net.inet.icmp`, - `net.inet.igmp`, - `net.inet.ip`, - `net.inet.ip.ifq`, - `net.inet.ipcomp`, - `net.inet.ipip`, - `net.inet.mobileip`, - `net.inet.pfsync`, - `net.inet.pim`, - `net.inet.tcp`, - `net.inet.udp`, - `net.inet6`, - `net.inet6.divert`, - `net.inet6.ip6`, - `net.inet6.icmp6`, - `net.inet6.pim6`, - `net.inet6.tcp6`, - `net.inet6.udp6`, - `net.mpls`, - `net.mpls.ifq`, - `net.key`, - `net.pflow`, - `net.pfsync`, - `net.pipex`, - `net.rt`, - `vm.swapencrypt`, - //vfsgenctl /* Special handling required */ - } - - // Node name "fixups" - ctlMap := map[string]string{ - "ipproto": "net.inet", - "net.inet.ipproto": "net.inet", - "net.inet6.ipv6proto": "net.inet6", - "net.inet6.ipv6": "net.inet6.ip6", - "net.inet.icmpv6": "net.inet6.icmp6", - "net.inet6.divert6": "net.inet6.divert", - "net.inet6.tcp6": "net.inet.tcp", - "net.inet6.udp6": "net.inet.udp", - "mpls": "net.mpls", - "swpenc": "vm.swapencrypt", - } - - // Node mappings - nodeMap = map[string]string{ - "net.inet.ip.ifq": "net.ifq", - "net.inet.pfsync": "net.pfsync", - "net.mpls.ifq": "net.ifq", - } - - mCtls := make(map[string]bool) - for _, ctl := range ctls { - mCtls[ctl] = true - } - - for _, header := range headers { - debug("Processing " + header) - file, err := os.Open(filepath.Join("/usr/include", header)) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - var sub []string - if reMatch(ctlNames1RE, s.Text(), &sub) || - reMatch(ctlNames2RE, s.Text(), &sub) || - reMatch(ctlNames3RE, s.Text(), &sub) { - if sub[1] == `CTL_NAMES` { - // Top level. - node = &mib - } else { - // Node. - nodename := strings.ToLower(sub[2]) - ctlName := "" - if reMatch(netInetRE, header, &sub) { - ctlName = "net.inet." + nodename - } else if reMatch(netInet6RE, header, &sub) { - ctlName = "net.inet6." + nodename - } else if reMatch(netRE, header, &sub) { - ctlName = "net." + nodename - } else { - ctlName = nodename - ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) - } - - if val, ok := ctlMap[ctlName]; ok { - ctlName = val - } - if _, ok := mCtls[ctlName]; !ok { - debug("Ignoring " + ctlName + "...") - continue - } - - // Walk down from the top of the MIB. - node = &mib - for _, part := range strings.Split(ctlName, ".") { - if _, ok := (*node)[part]; !ok { - debug("Missing node " + part) - (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} - } - node = (*node)[part].pE - } - } - - // Populate current node with entries. - i := -1 - for !strings.HasPrefix(s.Text(), "}") { - s.Scan() - if reMatch(bracesRE, s.Text(), &sub) { - i++ - } - if !reMatch(ctlTypeRE, s.Text(), &sub) { - continue - } - (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} - } - } - } - err = s.Err() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - file.Close() - } - buildSysctl(&mib, "", []int{}) - - sort.Strings(sysCtl) - text := strings.Join(sysCtl, "") - - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; DO NOT EDIT. - -// +build %s - -package unix - -type mibentry struct { - ctlname string - ctloid []_C_int -} - -var sysctlMib = []mibentry { -%s -} -` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index baa6ecd85..000000000 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 40d2beede..000000000 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e692..000000000 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79d..000000000 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index a121dc336..000000000 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_ATTACH = C.PT_ATTACH - PTRACE_CONT = C.PT_CONTINUE - PTRACE_DETACH = C.PT_DETACH - PTRACE_GETFPREGS = C.PT_GETFPREGS - PTRACE_GETFSBASE = C.PT_GETFSBASE - PTRACE_GETLWPLIST = C.PT_GETLWPLIST - PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS - PTRACE_GETREGS = C.PT_GETREGS - PTRACE_GETXSTATE = C.PT_GETXSTATE - PTRACE_IO = C.PT_IO - PTRACE_KILL = C.PT_KILL - PTRACE_LWPEVENTS = C.PT_LWP_EVENTS - PTRACE_LWPINFO = C.PT_LWPINFO - PTRACE_SETFPREGS = C.PT_SETFPREGS - PTRACE_SETREGS = C.PT_SETREGS - PTRACE_SINGLESTEP = C.PT_STEP - PTRACE_TRACEME = C.PT_TRACE_ME -) - -const ( - PIOD_READ_D = C.PIOD_READ_D - PIOD_WRITE_D = C.PIOD_WRITE_D - PIOD_READ_I = C.PIOD_READ_I - PIOD_WRITE_I = C.PIOD_WRITE_I -) - -const ( - PL_FLAG_BORN = C.PL_FLAG_BORN - PL_FLAG_EXITED = C.PL_FLAG_EXITED - PL_FLAG_SI = C.PL_FLAG_SI -) - -const ( - TRAP_BRKPT = C.TRAP_BRKPT - TRAP_TRACE = C.TRAP_TRACE -) - -type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo - -type __Siginfo C.struct___siginfo - -type Sigset_t C.sigset_t - -type Reg C.struct_reg - -type FpReg C.struct_fpreg - -type PtraceIoDesc C.struct_ptrace_io_desc - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 4a96d72c3..000000000 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 775cb57dc..000000000 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f934..000000000 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go deleted file mode 100644 index 26cfef9c6..000000000 --- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type registry struct { - XMLName xml.Name `xml:"registry"` - Updated string `xml:"updated"` - Registry []struct { - ID string `xml:"id,attr"` - Record []struct { - Name string `xml:"name"` - Xref []struct { - Type string `xml:"type,attr"` - Data string `xml:"data,attr"` - } `xml:"xref"` - Desc struct { - Data string `xml:",innerxml"` - // Any []struct { - // Data string `xml:",chardata"` - // } `xml:",any"` - // Data string `xml:",chardata"` - } `xml:"description,"` - MIB string `xml:"value"` - Alias []string `xml:"alias"` - MIME string `xml:"preferred_alias"` - } `xml:"record"` - } `xml:"registry"` -} - -func main() { - r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") - reg := ®istry{} - if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { - log.Fatalf("Error decoding charset registry: %v", err) - } - if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { - log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) - } - - w := &bytes.Buffer{} - fmt.Fprintf(w, "const (\n") - for _, rec := range reg.Registry[0].Record { - constName := "" - for _, a := range rec.Alias { - if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { - // Some of the constant definitions have comments in them. Strip those. - constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) - } - } - if constName == "" { - switch rec.MIB { - case "2085": - constName = "HZGB2312" // Not listed as alias for some reason. - default: - log.Fatalf("No cs alias defined for %s.", rec.MIB) - } - } - if rec.MIME != "" { - rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) - } - fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) - if len(rec.Desc.Data) > 0 { - fmt.Fprint(w, "// ") - d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) - inElem := true - attr := "" - for { - t, err := d.Token() - if err != nil { - if err != io.EOF { - log.Fatal(err) - } - break - } - switch x := t.(type) { - case xml.CharData: - attr = "" // Don't need attribute info. - a := bytes.Split([]byte(x), []byte("\n")) - for i, b := range a { - if b = bytes.TrimSpace(b); len(b) != 0 { - if !inElem && i > 0 { - fmt.Fprint(w, "\n// ") - } - inElem = false - fmt.Fprintf(w, "%s ", string(b)) - } - } - case xml.StartElement: - if x.Name.Local == "xref" { - inElem = true - use := false - for _, a := range x.Attr { - if a.Name.Local == "type" { - use = use || a.Value != "person" - } - if a.Name.Local == "data" && use { - // Patch up URLs to use https. From some links, the - // https version is different from the http one. - s := a.Value - s = strings.Replace(s, "http://", "https://", -1) - s = strings.Replace(s, "/unicode/", "/", -1) - attr = s + " " - } - } - } - case xml.EndElement: - inElem = false - fmt.Fprint(w, attr) - } - } - fmt.Fprint(w, "\n") - } - for _, x := range rec.Xref { - switch x.Type { - case "rfc": - fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) - case "uri": - fmt.Fprintf(w, "// Reference: %s\n", x.Data) - } - } - fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) - fmt.Fprintln(w) - } - fmt.Fprintln(w, ")") - - gen.WriteGoFile("mib.go", "identifier", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 987fc169c..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteVersionedGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 02c3b505d..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb994289..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 30a3aa933..000000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See https://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - fmt.Fprintln(w, `import "sync"`) - fmt.Fprintln(w) - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap map[uint32]rune") - fmt.Fprintln(w, "var recompMapOnce sync.Once\n") - fmt.Fprintln(w, `const recompMapPacked = "" +`) - var buf [8]byte - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - binary.BigEndian.PutUint32(buf[:4], key) - binary.BigEndian.PutUint32(buf[4:], uint32(i)) - fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) - } - } - // hack so we don't have to special case the trailing plus sign - fmt.Fprintf(w, ` ""`) - fmt.Fprintln(w) - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See https://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d711900..000000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7ce1977b5..65a405a72 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,43 +1,45 @@ # cloud.google.com/go v0.39.0 -cloud.google.com/go/monitoring/apiv3 -cloud.google.com/go/storage -cloud.google.com/go/spanner -cloud.google.com/go/kms/apiv1 cloud.google.com/go/civil cloud.google.com/go/compute/metadata cloud.google.com/go/iam cloud.google.com/go/internal +cloud.google.com/go/internal/fields cloud.google.com/go/internal/optional +cloud.google.com/go/internal/protostruct cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -cloud.google.com/go/internal/fields -cloud.google.com/go/internal/protostruct +cloud.google.com/go/kms/apiv1 +cloud.google.com/go/monitoring/apiv3 +cloud.google.com/go/spanner cloud.google.com/go/spanner/internal/backoff +cloud.google.com/go/storage # code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f code.cloudfoundry.org/gofileutils/fileutils # contrib.go.opencensus.io/exporter/ocagent v0.4.12 contrib.go.opencensus.io/exporter/ocagent # github.com/Azure/azure-sdk-for-go v29.0.0+incompatible -github.com/Azure/azure-sdk-for-go/storage -github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac +github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization +github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 -github.com/Azure/go-ansiterm/winterm github.com/Azure/go-ansiterm +github.com/Azure/go-ansiterm/winterm # github.com/Azure/go-autorest v11.7.1+incompatible -github.com/Azure/go-autorest/autorest/azure github.com/Azure/go-autorest/autorest -github.com/Azure/go-autorest/autorest/azure/auth -github.com/Azure/go-autorest/autorest/to -github.com/Azure/go-autorest/autorest/date -github.com/Azure/go-autorest/tracing -github.com/Azure/go-autorest/autorest/validation github.com/Azure/go-autorest/autorest/adal -github.com/Azure/go-autorest/logger +github.com/Azure/go-autorest/autorest/azure +github.com/Azure/go-autorest/autorest/azure/auth github.com/Azure/go-autorest/autorest/azure/cli +github.com/Azure/go-autorest/autorest/date +github.com/Azure/go-autorest/autorest/to +github.com/Azure/go-autorest/autorest/validation +github.com/Azure/go-autorest/logger +github.com/Azure/go-autorest/tracing +# github.com/BurntSushi/toml v0.3.1 +github.com/BurntSushi/toml # github.com/DataDog/datadog-go v2.2.0+incompatible github.com/DataDog/datadog-go/statsd # github.com/Jeffail/gabs v1.1.1 @@ -54,27 +56,27 @@ github.com/Nvveen/Gotty # github.com/SAP/go-hdb v0.14.1 github.com/SAP/go-hdb/driver github.com/SAP/go-hdb/driver/sqltrace -github.com/SAP/go-hdb/internal/protocol github.com/SAP/go-hdb/internal/bufio +github.com/SAP/go-hdb/internal/protocol github.com/SAP/go-hdb/internal/unicode github.com/SAP/go-hdb/internal/unicode/cesu8 # github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 github.com/StackExchange/wmi # github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f -github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth -github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers github.com/aliyun/alibaba-cloud-sdk-go/sdk -github.com/aliyun/alibaba-cloud-sdk-go/services/kms -github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints -github.com/aliyun/alibaba-cloud-sdk-go/services/sts +github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials +github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider +github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers +github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils -github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider +github.com/aliyun/alibaba-cloud-sdk-go/services/kms github.com/aliyun/alibaba-cloud-sdk-go/services/ram +github.com/aliyun/alibaba-cloud-sdk-go/services/sts # github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 github.com/aliyun/aliyun-oss-go-sdk/oss # github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 @@ -95,52 +97,52 @@ github.com/armon/go-radix github.com/asaskevich/govalidator # github.com/aws/aws-sdk-go v1.19.39 github.com/aws/aws-sdk-go/aws -github.com/aws/aws-sdk-go/aws/credentials -github.com/aws/aws-sdk-go/aws/credentials/stscreds -github.com/aws/aws-sdk-go/aws/endpoints -github.com/aws/aws-sdk-go/aws/session -github.com/aws/aws-sdk-go/service/ec2 -github.com/aws/aws-sdk-go/service/iam -github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr -github.com/aws/aws-sdk-go/service/iam/iamiface -github.com/aws/aws-sdk-go/service/sts/stsiface -github.com/aws/aws-sdk-go/aws/defaults -github.com/aws/aws-sdk-go/aws/ec2metadata -github.com/aws/aws-sdk-go/aws/request -github.com/aws/aws-sdk-go/service/dynamodb -github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute -github.com/aws/aws-sdk-go/service/s3 -github.com/aws/aws-sdk-go/service/kms -github.com/aws/aws-sdk-go/service/kms/kmsiface -github.com/aws/aws-sdk-go/internal/sdkio -github.com/aws/aws-sdk-go/internal/ini -github.com/aws/aws-sdk-go/internal/shareddefaults -github.com/aws/aws-sdk-go/aws/client -github.com/aws/aws-sdk-go/internal/sdkrand -github.com/aws/aws-sdk-go/aws/corehandlers -github.com/aws/aws-sdk-go/aws/credentials/processcreds -github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/awsutil +github.com/aws/aws-sdk-go/aws/client github.com/aws/aws-sdk-go/aws/client/metadata -github.com/aws/aws-sdk-go/aws/signer/v4 -github.com/aws/aws-sdk-go/private/protocol -github.com/aws/aws-sdk-go/private/protocol/ec2query -github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/aws/corehandlers +github.com/aws/aws-sdk-go/aws/credentials github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds github.com/aws/aws-sdk-go/aws/credentials/endpointcreds -github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/stscreds github.com/aws/aws-sdk-go/aws/crr -github.com/aws/aws-sdk-go/private/protocol/jsonrpc +github.com/aws/aws-sdk-go/aws/csm +github.com/aws/aws-sdk-go/aws/defaults +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session +github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/ini github.com/aws/aws-sdk-go/internal/s3err +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/private/protocol +github.com/aws/aws-sdk-go/private/protocol/ec2query github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc +github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil -github.com/aws/aws-sdk-go/private/protocol/query/queryutil -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/service/dynamodb +github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute +github.com/aws/aws-sdk-go/service/ec2 +github.com/aws/aws-sdk-go/service/iam +github.com/aws/aws-sdk-go/service/iam/iamiface +github.com/aws/aws-sdk-go/service/kms +github.com/aws/aws-sdk-go/service/kms/kmsiface +github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/beorn7/perks/quantile # github.com/bgentry/speakeasy v0.1.0 @@ -150,10 +152,10 @@ github.com/boombuler/barcode github.com/boombuler/barcode/qr github.com/boombuler/barcode/utils # github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f +github.com/briankassouf/jose github.com/briankassouf/jose/crypto github.com/briankassouf/jose/jws github.com/briankassouf/jose/jwt -github.com/briankassouf/jose # github.com/cenkalti/backoff v2.2.1+incompatible github.com/cenkalti/backoff # github.com/census-instrumentation/opencensus-proto v0.2.0 @@ -171,8 +173,8 @@ github.com/chrismalek/oktasdk-go/okta # github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible github.com/circonus-labs/circonus-gometrics github.com/circonus-labs/circonus-gometrics/api -github.com/circonus-labs/circonus-gometrics/checkmgr github.com/circonus-labs/circonus-gometrics/api/config +github.com/circonus-labs/circonus-gometrics/checkmgr # github.com/circonus-labs/circonusllhist v0.1.3 github.com/circonus-labs/circonusllhist # github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 @@ -203,12 +205,12 @@ github.com/docker/go-connections/nat # github.com/docker/go-units v0.4.0 github.com/docker/go-units # github.com/dsnet/compress v0.0.1 +github.com/dsnet/compress github.com/dsnet/compress/bzip2 github.com/dsnet/compress/bzip2/internal/sais github.com/dsnet/compress/internal github.com/dsnet/compress/internal/errors github.com/dsnet/compress/internal/prefix -github.com/dsnet/compress # github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 github.com/duosecurity/duo_api_golang github.com/duosecurity/duo_api_golang/authapi @@ -243,25 +245,25 @@ github.com/gocql/gocql/internal/lru github.com/gocql/gocql/internal/murmur github.com/gocql/gocql/internal/streams # github.com/gogo/protobuf v1.2.1 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/io github.com/gogo/protobuf/proto -github.com/gogo/protobuf/sortkeys -github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/protoc-gen-gogo/descriptor +github.com/gogo/protobuf/sortkeys # github.com/golang/protobuf v1.3.2 -github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/timestamp -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/struct -github.com/golang/protobuf/ptypes/empty -github.com/golang/protobuf/ptypes/wrappers -github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/jsonpb +github.com/golang/protobuf/proto +github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/protoc-gen-go/generator github.com/golang/protobuf/protoc-gen-go/generator/internal/remap github.com/golang/protobuf/protoc-gen-go/plugin +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty +github.com/golang/protobuf/ptypes/struct +github.com/golang/protobuf/ptypes/timestamp +github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.1 github.com/golang/snappy # github.com/google/go-github v17.0.0+incompatible @@ -279,12 +281,22 @@ github.com/googleapis/gax-go/v2 # github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 github.com/gorhill/cronexpr # github.com/grpc-ecosystem/grpc-gateway v1.8.5 +github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities -github.com/grpc-ecosystem/grpc-gateway/internal # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/hailocab/go-hostpool -# github.com/hashicorp/consul/api v1.0.1 +# github.com/hashicorp/consul-template v0.22.0 +github.com/hashicorp/consul-template/child +github.com/hashicorp/consul-template/config +github.com/hashicorp/consul-template/dependency +github.com/hashicorp/consul-template/manager +github.com/hashicorp/consul-template/renderer +github.com/hashicorp/consul-template/signals +github.com/hashicorp/consul-template/template +github.com/hashicorp/consul-template/version +github.com/hashicorp/consul-template/watch +# github.com/hashicorp/consul/api v1.1.0 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap @@ -314,6 +326,7 @@ github.com/hashicorp/go-retryablehttp github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-sockaddr +github.com/hashicorp/go-sockaddr/template # github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-syslog # github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 @@ -326,12 +339,12 @@ github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl github.com/hashicorp/hcl/hcl/ast -github.com/hashicorp/hcl/hcl/printer github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/token -github.com/hashicorp/hcl/json/parser +github.com/hashicorp/hcl/hcl/printer github.com/hashicorp/hcl/hcl/scanner github.com/hashicorp/hcl/hcl/strconv +github.com/hashicorp/hcl/hcl/token +github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token # github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf @@ -341,7 +354,7 @@ github.com/hashicorp/nomad/api/contexts github.com/hashicorp/raft # github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab github.com/hashicorp/raft-snapshot -# github.com/hashicorp/serf v0.8.2 +# github.com/hashicorp/serf v0.8.3 github.com/hashicorp/serf/coordinate # github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec github.com/hashicorp/vault-plugin-auth-alicloud @@ -352,11 +365,11 @@ github.com/hashicorp/vault-plugin-auth-azure github.com/hashicorp/vault-plugin-auth-centrify # github.com/hashicorp/vault-plugin-auth-cf v0.0.0-20190821162840-1c2205826fee github.com/hashicorp/vault-plugin-auth-cf -github.com/hashicorp/vault-plugin-auth-cf/signatures github.com/hashicorp/vault-plugin-auth-cf/models -github.com/hashicorp/vault-plugin-auth-cf/util +github.com/hashicorp/vault-plugin-auth-cf/signatures github.com/hashicorp/vault-plugin-auth-cf/testing/certificates github.com/hashicorp/vault-plugin-auth-cf/testing/cf +github.com/hashicorp/vault-plugin-auth-cf/util # github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190814210049-1ccb3dc10102 github.com/hashicorp/vault-plugin-auth-gcp/plugin github.com/hashicorp/vault-plugin-auth-gcp/plugin/cache @@ -388,49 +401,50 @@ github.com/hashicorp/vault-plugin-secrets-kv # github.com/hashicorp/vault/api v1.0.5-0.20191017194845-99f7184d3326 => ./api github.com/hashicorp/vault/api # github.com/hashicorp/vault/sdk v0.1.14-0.20191017211055-9bd3a27a36c4 => ./sdk -github.com/hashicorp/vault/sdk/helper/salt -github.com/hashicorp/vault/sdk/helper/strutil -github.com/hashicorp/vault/sdk/helper/wrapping -github.com/hashicorp/vault/sdk/logical -github.com/hashicorp/vault/sdk/helper/parseutil -github.com/hashicorp/vault/sdk/framework -github.com/hashicorp/vault/sdk/helper/policyutil -github.com/hashicorp/vault/sdk/plugin -github.com/hashicorp/vault/sdk/helper/cidrutil -github.com/hashicorp/vault/sdk/helper/consts -github.com/hashicorp/vault/sdk/helper/locksutil -github.com/hashicorp/vault/sdk/helper/tokenutil -github.com/hashicorp/vault/sdk/helper/jsonutil -github.com/hashicorp/vault/sdk/helper/certutil -github.com/hashicorp/vault/sdk/helper/password -github.com/hashicorp/vault/sdk/helper/ldaputil -github.com/hashicorp/vault/sdk/helper/tlsutil github.com/hashicorp/vault/sdk/database/dbplugin +github.com/hashicorp/vault/sdk/database/helper/connutil +github.com/hashicorp/vault/sdk/database/helper/credsutil github.com/hashicorp/vault/sdk/database/helper/dbutil -github.com/hashicorp/vault/sdk/queue -github.com/hashicorp/vault/sdk/helper/dbtxn -github.com/hashicorp/vault/sdk/helper/errutil -github.com/hashicorp/vault/sdk/helper/keysutil +github.com/hashicorp/vault/sdk/framework github.com/hashicorp/vault/sdk/helper/base62 +github.com/hashicorp/vault/sdk/helper/certutil +github.com/hashicorp/vault/sdk/helper/cidrutil +github.com/hashicorp/vault/sdk/helper/compressutil +github.com/hashicorp/vault/sdk/helper/consts +github.com/hashicorp/vault/sdk/helper/cryptoutil +github.com/hashicorp/vault/sdk/helper/dbtxn +github.com/hashicorp/vault/sdk/helper/entropy +github.com/hashicorp/vault/sdk/helper/errutil +github.com/hashicorp/vault/sdk/helper/hclutil +github.com/hashicorp/vault/sdk/helper/jsonutil +github.com/hashicorp/vault/sdk/helper/kdf +github.com/hashicorp/vault/sdk/helper/keysutil +github.com/hashicorp/vault/sdk/helper/ldaputil +github.com/hashicorp/vault/sdk/helper/license +github.com/hashicorp/vault/sdk/helper/locksutil github.com/hashicorp/vault/sdk/helper/logging github.com/hashicorp/vault/sdk/helper/mlock +github.com/hashicorp/vault/sdk/helper/parseutil +github.com/hashicorp/vault/sdk/helper/password +github.com/hashicorp/vault/sdk/helper/pathmanager +github.com/hashicorp/vault/sdk/helper/pluginutil +github.com/hashicorp/vault/sdk/helper/pointerutil +github.com/hashicorp/vault/sdk/helper/policyutil +github.com/hashicorp/vault/sdk/helper/salt +github.com/hashicorp/vault/sdk/helper/strutil +github.com/hashicorp/vault/sdk/helper/tlsutil +github.com/hashicorp/vault/sdk/helper/tokenutil github.com/hashicorp/vault/sdk/helper/useragent +github.com/hashicorp/vault/sdk/helper/wrapping +github.com/hashicorp/vault/sdk/logical github.com/hashicorp/vault/sdk/physical github.com/hashicorp/vault/sdk/physical/file github.com/hashicorp/vault/sdk/physical/inmem -github.com/hashicorp/vault/sdk/version -github.com/hashicorp/vault/sdk/helper/cryptoutil -github.com/hashicorp/vault/sdk/helper/hclutil -github.com/hashicorp/vault/sdk/database/helper/credsutil -github.com/hashicorp/vault/sdk/helper/compressutil -github.com/hashicorp/vault/sdk/helper/pathmanager -github.com/hashicorp/vault/sdk/plugin/pb -github.com/hashicorp/vault/sdk/database/helper/connutil -github.com/hashicorp/vault/sdk/helper/license -github.com/hashicorp/vault/sdk/helper/pluginutil -github.com/hashicorp/vault/sdk/helper/entropy -github.com/hashicorp/vault/sdk/helper/kdf +github.com/hashicorp/vault/sdk/plugin github.com/hashicorp/vault/sdk/plugin/mock +github.com/hashicorp/vault/sdk/plugin/pb +github.com/hashicorp/vault/sdk/queue +github.com/hashicorp/vault/sdk/version # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux # github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 @@ -439,11 +453,11 @@ github.com/influxdata/influxdb/models github.com/influxdata/influxdb/pkg/escape # github.com/jackc/pgx v3.3.0+incompatible github.com/jackc/pgx +github.com/jackc/pgx/chunkreader github.com/jackc/pgx/internal/sanitize github.com/jackc/pgx/pgio github.com/jackc/pgx/pgproto3 github.com/jackc/pgx/pgtype -github.com/jackc/pgx/chunkreader # github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 github.com/jeffchao/backoff # github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f @@ -455,25 +469,25 @@ github.com/jmespath/go-jmespath # github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869 github.com/joyent/triton-go github.com/joyent/triton-go/authentication +github.com/joyent/triton-go/client github.com/joyent/triton-go/errors github.com/joyent/triton-go/storage -github.com/joyent/triton-go/client # github.com/json-iterator/go v1.1.6 github.com/json-iterator/go # github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f -github.com/keybase/go-crypto/openpgp -github.com/keybase/go-crypto/openpgp/packet -github.com/keybase/go-crypto/openpgp/armor -github.com/keybase/go-crypto/openpgp/errors -github.com/keybase/go-crypto/openpgp/s2k -github.com/keybase/go-crypto/rsa github.com/keybase/go-crypto/brainpool github.com/keybase/go-crypto/cast5 github.com/keybase/go-crypto/curve25519 github.com/keybase/go-crypto/ed25519 +github.com/keybase/go-crypto/ed25519/internal/edwards25519 +github.com/keybase/go-crypto/openpgp +github.com/keybase/go-crypto/openpgp/armor github.com/keybase/go-crypto/openpgp/ecdh github.com/keybase/go-crypto/openpgp/elgamal -github.com/keybase/go-crypto/ed25519/internal/edwards25519 +github.com/keybase/go-crypto/openpgp/errors +github.com/keybase/go-crypto/openpgp/packet +github.com/keybase/go-crypto/openpgp/s2k +github.com/keybase/go-crypto/rsa # github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/konsorten/go-windows-terminal-sequences # github.com/kr/pretty v0.1.0 @@ -488,6 +502,8 @@ github.com/lib/pq/scram github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.8 github.com/mattn/go-isatty +# github.com/mattn/go-shellwords v1.0.5 +github.com/mattn/go-shellwords # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mholt/archiver v3.1.1+incompatible @@ -502,6 +518,8 @@ github.com/mitchellh/copystructure github.com/mitchellh/go-homedir # github.com/mitchellh/go-testing-interface v1.0.0 github.com/mitchellh/go-testing-interface +# github.com/mitchellh/hashstructure v1.0.0 +github.com/mitchellh/hashstructure # github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure # github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8 @@ -521,15 +539,15 @@ github.com/oklog/run # github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 -github.com/opencontainers/image-spec/specs-go/v1 github.com/opencontainers/image-spec/specs-go +github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v0.1.1 github.com/opencontainers/runc/libcontainer/user # github.com/oracle/oci-go-sdk v7.0.0+incompatible github.com/oracle/oci-go-sdk/common github.com/oracle/oci-go-sdk/common/auth -github.com/oracle/oci-go-sdk/objectstorage github.com/oracle/oci-go-sdk/keymanagement +github.com/oracle/oci-go-sdk/objectstorage # github.com/ory/dockertest v3.3.4+incompatible github.com/ory/dockertest github.com/ory/dockertest/docker @@ -537,23 +555,23 @@ github.com/ory/dockertest/docker/opts github.com/ory/dockertest/docker/pkg/archive github.com/ory/dockertest/docker/pkg/fileutils github.com/ory/dockertest/docker/pkg/homedir -github.com/ory/dockertest/docker/pkg/jsonmessage -github.com/ory/dockertest/docker/pkg/stdcopy -github.com/ory/dockertest/docker/types/registry -github.com/ory/dockertest/docker/types github.com/ory/dockertest/docker/pkg/idtools github.com/ory/dockertest/docker/pkg/ioutils +github.com/ory/dockertest/docker/pkg/jsonmessage github.com/ory/dockertest/docker/pkg/longpath +github.com/ory/dockertest/docker/pkg/mount github.com/ory/dockertest/docker/pkg/pools +github.com/ory/dockertest/docker/pkg/stdcopy github.com/ory/dockertest/docker/pkg/system github.com/ory/dockertest/docker/pkg/term +github.com/ory/dockertest/docker/pkg/term/windows +github.com/ory/dockertest/docker/types +github.com/ory/dockertest/docker/types/blkiodev github.com/ory/dockertest/docker/types/container github.com/ory/dockertest/docker/types/filters github.com/ory/dockertest/docker/types/mount github.com/ory/dockertest/docker/types/network -github.com/ory/dockertest/docker/pkg/mount -github.com/ory/dockertest/docker/pkg/term/windows -github.com/ory/dockertest/docker/types/blkiodev +github.com/ory/dockertest/docker/types/registry github.com/ory/dockertest/docker/types/strslice github.com/ory/dockertest/docker/types/versions # github.com/patrickmn/go-cache v2.1.0+incompatible @@ -567,16 +585,16 @@ github.com/pkg/errors github.com/pmezard/go-difflib/difflib # github.com/posener/complete v1.2.1 github.com/posener/complete -github.com/posener/complete/cmd/install github.com/posener/complete/cmd +github.com/posener/complete/cmd/install github.com/posener/complete/match # github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 github.com/pquerna/cachecontrol github.com/pquerna/cachecontrol/cacheobject # github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d github.com/pquerna/otp -github.com/pquerna/otp/totp github.com/pquerna/otp/hotp +github.com/pquerna/otp/totp # github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal @@ -584,13 +602,13 @@ github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_model/go # github.com/prometheus/common v0.2.0 github.com/prometheus/common/expfmt -github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/model # github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 github.com/prometheus/procfs +github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/nfs github.com/prometheus/procfs/xfs -github.com/prometheus/procfs/internal/util # github.com/ryanuber/columnize v2.1.0+incompatible github.com/ryanuber/columnize # github.com/ryanuber/go-glob v1.0.0 @@ -603,10 +621,10 @@ github.com/satori/go.uuid github.com/shirou/gopsutil/cpu github.com/shirou/gopsutil/disk github.com/shirou/gopsutil/host -github.com/shirou/gopsutil/mem github.com/shirou/gopsutil/internal/common -github.com/shirou/gopsutil/process +github.com/shirou/gopsutil/mem github.com/shirou/gopsutil/net +github.com/shirou/gopsutil/process # github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 github.com/shirou/w32 # github.com/sirupsen/logrus v1.4.2 @@ -620,183 +638,186 @@ github.com/tv42/httpunix github.com/ugorji/go/codec # github.com/ulikunitz/xz v0.5.6 github.com/ulikunitz/xz +github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -github.com/ulikunitz/xz/internal/hash # github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 github.com/xi2/xz # go.etcd.io/bbolt v1.3.2 go.etcd.io/bbolt # go.etcd.io/etcd v0.0.0-20190412021913-f29b1ada1971 +go.etcd.io/etcd/auth/authpb go.etcd.io/etcd/client go.etcd.io/etcd/clientv3 -go.etcd.io/etcd/clientv3/concurrency -go.etcd.io/etcd/pkg/transport -go.etcd.io/etcd/pkg/pathutil -go.etcd.io/etcd/pkg/srv -go.etcd.io/etcd/pkg/types -go.etcd.io/etcd/version -go.etcd.io/etcd/auth/authpb go.etcd.io/etcd/clientv3/balancer go.etcd.io/etcd/clientv3/balancer/picker go.etcd.io/etcd/clientv3/balancer/resolver/endpoint +go.etcd.io/etcd/clientv3/concurrency go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes go.etcd.io/etcd/etcdserver/etcdserverpb go.etcd.io/etcd/mvcc/mvccpb go.etcd.io/etcd/pkg/logutil -go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/pathutil +go.etcd.io/etcd/pkg/srv go.etcd.io/etcd/pkg/systemd +go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/transport +go.etcd.io/etcd/pkg/types go.etcd.io/etcd/raft go.etcd.io/etcd/raft/raftpb +go.etcd.io/etcd/version # go.opencensus.io v0.21.0 -go.opencensus.io/stats -go.opencensus.io/stats/view -go.opencensus.io/plugin/ochttp -go.opencensus.io/plugin/ochttp/propagation/tracecontext -go.opencensus.io/trace -go.opencensus.io/metric/metricdata -go.opencensus.io/stats/internal -go.opencensus.io/tag +go.opencensus.io +go.opencensus.io/internal go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricdata go.opencensus.io/metric/metricproducer go.opencensus.io/plugin/ocgrpc +go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 -go.opencensus.io/trace/propagation -go.opencensus.io +go.opencensus.io/plugin/ochttp/propagation/tracecontext go.opencensus.io/resource -go.opencensus.io/trace/tracestate -go.opencensus.io/internal +go.opencensus.io/stats +go.opencensus.io/stats/internal +go.opencensus.io/stats/view +go.opencensus.io/tag +go.opencensus.io/trace go.opencensus.io/trace/internal +go.opencensus.io/trace/propagation +go.opencensus.io/trace/tracestate # go.uber.org/atomic v1.4.0 go.uber.org/atomic # go.uber.org/multierr v1.1.0 go.uber.org/multierr # go.uber.org/zap v1.9.1 go.uber.org/zap -go.uber.org/zap/zapcore -go.uber.org/zap/internal/bufferpool go.uber.org/zap/buffer +go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/crypto/bcrypt -golang.org/x/crypto/ed25519 -golang.org/x/crypto/ssh -golang.org/x/crypto/ssh/agent -golang.org/x/crypto/curve25519 +golang.org/x/crypto/blake2b +golang.org/x/crypto/blowfish +golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 -golang.org/x/crypto/ssh/terminal -golang.org/x/crypto/blowfish -golang.org/x/crypto/md4 +golang.org/x/crypto/curve25519 +golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 -golang.org/x/crypto/internal/chacha20 -golang.org/x/crypto/poly1305 -golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/hkdf -golang.org/x/crypto/pbkdf2 -golang.org/x/crypto/blake2b -golang.org/x/crypto/pkcs12 +golang.org/x/crypto/internal/chacha20 golang.org/x/crypto/internal/subtle +golang.org/x/crypto/md4 +golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 +golang.org/x/crypto/poly1305 +golang.org/x/crypto/ssh +golang.org/x/crypto/ssh/agent +golang.org/x/crypto/ssh/terminal # golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 -golang.org/x/net/idna +golang.org/x/net/context +golang.org/x/net/context/ctxhttp +golang.org/x/net/http/httpguts golang.org/x/net/http/httpproxy golang.org/x/net/http2 -golang.org/x/net/context -golang.org/x/net/http/httpguts golang.org/x/net/http2/hpack -golang.org/x/net/trace -golang.org/x/net/context/ctxhttp +golang.org/x/net/idna golang.org/x/net/internal/timeseries +golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a golang.org/x/oauth2 -golang.org/x/oauth2/internal -golang.org/x/oauth2/google -golang.org/x/oauth2/jwt -golang.org/x/oauth2/jws golang.org/x/oauth2/clientcredentials +golang.org/x/oauth2/google +golang.org/x/oauth2/internal +golang.org/x/oauth2/jws +golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a +golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows -golang.org/x/sys/cpu # golang.org/x/text v0.3.2 -golang.org/x/text/secure/bidirule -golang.org/x/text/unicode/bidi -golang.org/x/text/unicode/norm -golang.org/x/text/transform -golang.org/x/text/encoding/unicode golang.org/x/text/encoding golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier +golang.org/x/text/encoding/unicode golang.org/x/text/internal/utf8internal golang.org/x/text/runes +golang.org/x/text/secure/bidirule +golang.org/x/text/transform +golang.org/x/text/unicode/bidi +golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate # google.golang.org/api v0.5.0 -google.golang.org/api/option -google.golang.org/api/iam/v1 -google.golang.org/api/googleapi -google.golang.org/api/iterator -google.golang.org/api/transport google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 -google.golang.org/api/internal -google.golang.org/api/oauth2/v2 google.golang.org/api/gensupport -google.golang.org/api/transport/http -google.golang.org/api/storage/v1 +google.golang.org/api/googleapi google.golang.org/api/googleapi/internal/uritemplates -google.golang.org/api/transport/grpc google.golang.org/api/googleapi/transport -google.golang.org/api/transport/http/internal/propagation +google.golang.org/api/iam/v1 +google.golang.org/api/internal +google.golang.org/api/iterator +google.golang.org/api/oauth2/v2 +google.golang.org/api/option +google.golang.org/api/storage/v1 google.golang.org/api/support/bundler +google.golang.org/api/transport +google.golang.org/api/transport/grpc +google.golang.org/api/transport/http +google.golang.org/api/transport/http/internal/propagation # google.golang.org/appengine v1.6.0 -google.golang.org/appengine/cloudsql -google.golang.org/appengine/urlfetch google.golang.org/appengine -google.golang.org/appengine/socket +google.golang.org/appengine/cloudsql google.golang.org/appengine/internal -google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/socket +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/socket +google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 -google.golang.org/genproto/googleapis/cloud/kms/v1 +google.golang.org/genproto/googleapis/api +google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/distribution +google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres -google.golang.org/genproto/googleapis/monitoring/v3 -google.golang.org/genproto/googleapis/api/distribution -google.golang.org/genproto/protobuf/field_mask +google.golang.org/genproto/googleapis/cloud/kms/v1 google.golang.org/genproto/googleapis/iam/v1 -google.golang.org/genproto/googleapis/rpc/errdetails -google.golang.org/genproto/googleapis/spanner/v1 -google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/googleapis/api/annotations -google.golang.org/genproto/googleapis/api -google.golang.org/genproto/googleapis/api/label +google.golang.org/genproto/googleapis/monitoring/v3 google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/rpc/errdetails +google.golang.org/genproto/googleapis/rpc/status +google.golang.org/genproto/googleapis/spanner/v1 google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/protobuf/field_mask # google.golang.org/grpc v1.22.0 -google.golang.org/grpc/grpclog -google.golang.org/grpc/codes google.golang.org/grpc -google.golang.org/grpc/keepalive -google.golang.org/grpc/status -google.golang.org/grpc/metadata -google.golang.org/grpc/credentials google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/codes google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/health +google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancerload @@ -805,7 +826,10 @@ google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver @@ -813,14 +837,8 @@ google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats +google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/health -google.golang.org/grpc/health/grpc_health_v1 -google.golang.org/grpc/credentials/internal -google.golang.org/grpc/credentials/oauth -google.golang.org/grpc/balancer/base -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/internal/syscall # gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d gopkg.in/asn1-ber.v1 # gopkg.in/inf.v0 v0.9.1 @@ -830,43 +848,43 @@ gopkg.in/ini.v1 # gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/mgo.v2 gopkg.in/mgo.v2/bson +gopkg.in/mgo.v2/internal/json gopkg.in/mgo.v2/internal/sasl gopkg.in/mgo.v2/internal/scram -gopkg.in/mgo.v2/internal/json # gopkg.in/ory-am/dockertest.v3 v3.3.4 gopkg.in/ory-am/dockertest.v3 # gopkg.in/square/go-jose.v2 v2.3.1 -gopkg.in/square/go-jose.v2/jwt gopkg.in/square/go-jose.v2 -gopkg.in/square/go-jose.v2/json gopkg.in/square/go-jose.v2/cipher +gopkg.in/square/go-jose.v2/json +gopkg.in/square/go-jose.v2/jwt # gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 # k8s.io/api v0.0.0-20190409092523-d687e77c8ae9 k8s.io/api/authentication/v1 # k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/runtime/schema -k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/selection -k8s.io/apimachinery/pkg/util/intstr -k8s.io/apimachinery/pkg/util/runtime -k8s.io/apimachinery/pkg/watch -k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/naming -k8s.io/apimachinery/pkg/util/sets -k8s.io/apimachinery/third_party/forked/golang/reflect -k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 k8s.io/klog # layeh.com/radius v0.0.0-20190322222518-890bc1058917