run go mod vendor (#7736)

This commit is contained in:
Sam Salisbury 2019-10-25 13:35:22 +01:00 committed by GitHub
parent e3450dddeb
commit 8f0c38f78d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
136 changed files with 17702 additions and 8897 deletions

4
go.mod
View File

@ -29,7 +29,6 @@ require (
github.com/cockroachdb/apd v1.1.0 // indirect
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
github.com/coreos/go-semver v0.2.0
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d // indirect
github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a
github.com/dnaeon/go-vcr v1.0.1 // indirect
github.com/dsnet/compress v0.0.1 // indirect
@ -48,7 +47,6 @@ require (
github.com/golang/protobuf v1.3.2
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a
github.com/google/go-querystring v1.0.0 // indirect
github.com/hashicorp/consul-template v0.22.0
github.com/hashicorp/consul/api v1.1.0
github.com/hashicorp/errwrap v1.0.0
@ -93,7 +91,6 @@ require (
github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869
github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f
github.com/kr/pretty v0.1.0
github.com/kr/pty v1.1.3 // indirect
github.com/kr/text v0.1.0
github.com/lib/pq v1.2.0
github.com/mattn/go-colorable v0.1.2
@ -108,7 +105,6 @@ require (
github.com/ncw/swift v1.0.47
github.com/nwaples/rardecode v1.0.0 // indirect
github.com/oklog/run v1.0.0
github.com/onsi/ginkgo v1.7.0 // indirect
github.com/oracle/oci-go-sdk v7.0.0+incompatible
github.com/ory/dockertest v3.3.4+incompatible
github.com/patrickmn/go-cache v2.1.0+incompatible

1
go.sum
View File

@ -236,6 +236,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=

5
vendor/github.com/BurntSushi/toml/.gitignore generated vendored Normal file
View File

@ -0,0 +1,5 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

15
vendor/github.com/BurntSushi/toml/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,15 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

3
vendor/github.com/BurntSushi/toml/COMPATIBLE generated vendored Normal file
View File

@ -0,0 +1,3 @@
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

21
vendor/github.com/BurntSushi/toml/COPYING generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

19
vendor/github.com/BurntSushi/toml/Makefile generated vendored Normal file
View File

@ -0,0 +1,19 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

218
vendor/github.com/BurntSushi/toml/README.md generated vendored Normal file
View File

@ -0,0 +1,218 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

509
vendor/github.com/BurntSushi/toml/decode.go generated vendored Normal file
View File

@ -0,0 +1,509 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if mapping == nil {
return nil
}
return e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
}
rv.SetLen(n)
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("value %d is out of range for int32", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

121
vendor/github.com/BurntSushi/toml/decode_meta.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

27
vendor/github.com/BurntSushi/toml/doc.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

568
vendor/github.com/BurntSushi/toml/encode.go generated vendored Normal file
View File

@ -0,0 +1,568 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
continue
}
frv := rv.Field(i)
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
opts := getOptions(sft.Tag)
if opts.skip {
continue
}
keyName := sft.Name
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(sf) {
continue
}
if opts.omitzero && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
}
return tomlArray
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
type tagOptions struct {
skip bool // "-"
name string
omitempty bool
omitzero bool
}
func getOptions(tag reflect.StructTag) tagOptions {
t := tag.Get("toml")
if t == "-" {
return tagOptions{skip: true}
}
var opts tagOptions
parts := strings.Split(t, ",")
opts.name = parts[0]
for _, s := range parts[1:] {
switch s {
case "omitempty":
opts.omitempty = true
case "omitzero":
opts.omitzero = true
}
}
return opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return rv.Uint() == 0
case reflect.Float32, reflect.Float64:
return rv.Float() == 0.0
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Bool:
return !rv.Bool()
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

19
vendor/github.com/BurntSushi/toml/encoding_types.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View File

@ -0,0 +1,18 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

953
vendor/github.com/BurntSushi/toml/lex.go generated vendored Normal file
View File

@ -0,0 +1,953 @@
package toml
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// skip ignores all input that matches the given predicate.
func (lx *lexer) skip(pred func(rune) bool) {
for {
r := lx.next()
if pred(r) {
continue
}
lx.backup()
lx.ignore()
return
}
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.emit(itemEOF)
return nil
}
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
default:
return lexBareTableName
}
}
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.backup()
lx.emit(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("bare keys cannot contain %q", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
}
switch r {
case arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
// user wrote something like
// x = foo
// (i.e. not 'true' or 'false' but is something else word-like.)
lx.backup()
return lexBool
}
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
return lexMultilineString
}
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
return lexNumber
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '-':
return lexDatetime
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDatetime consumes a Datetime, to a first approximation.
// The parser validates that it matches one of the accepted formats.
func lexDatetime(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDatetime
}
switch r {
case '-', 'T', ':', '.', 'Z', '+':
return lexDatetime
}
lx.backup()
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that a sign
// has already been read, but that *no* digits have been consumed.
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
}
switch r {
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloat consumes the elements of a float. It allows any sequence of
// float-like characters, so floats emitted by the lexer are only a first
// approximation and must be validated by the parser.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
switch r {
case '_', '.', '-', '+', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexBool consumes a bool string: 'true' or 'false.
func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if !unicode.IsLetter(r) {
lx.backup()
break
}
rs = append(rs, r)
}
s := string(rs)
switch s {
case "true", "false":
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

592
vendor/github.com/BurntSushi/toml/parse.go generated vendored Normal file
View File

@ -0,0 +1,592 @@
package toml
import (
"fmt"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
var t time.Time
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
}
return accept
}
// numPeriodsOK checks whether every period in s is followed by a digit.
func numPeriodsOK(s string) bool {
period := false
for _, r := range s {
if period && !isDigit(r) {
return false
}
period = r == '.'
}
return !period
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

1
vendor/github.com/BurntSushi/toml/session.vim generated vendored Normal file
View File

@ -0,0 +1 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

91
vendor/github.com/BurntSushi/toml/type_check.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

242
vendor/github.com/BurntSushi/toml/type_fields.go generated vendored Normal file
View File

@ -0,0 +1,242 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
opts := getOptions(sf.Tag)
if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := opts.name != ""
name := opts.name
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

View File

@ -1,115 +0,0 @@
// +build ignore
package main
import (
"bytes"
"go/format"
"io/ioutil"
"log"
"net/http"
"sort"
"strings"
"text/template"
"time"
"gopkg.in/yaml.v2"
)
type CFCode int
type HTTPCode int
type Definition struct {
CFCode `yaml:"-"`
Name string `yaml:"name"`
HTTPCode `yaml:"http_code"`
Message string `yaml:"message"`
}
func main() {
const url = "https://raw.githubusercontent.com/cloudfoundry/cloud_controller_ng/master/vendor/errors/v2.yml"
resp, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var m map[CFCode]Definition
if err := yaml.Unmarshal(body, &m); err != nil {
log.Fatal(err)
}
var definitions []Definition
for c, d := range m {
d.CFCode = c
definitions = append(definitions, d)
}
sort.Slice(definitions, func(i, j int) bool {
return definitions[i].CFCode < definitions[j].CFCode
})
buf := &bytes.Buffer{}
if err := packageTemplate.Execute(buf, struct {
Timestamp time.Time
Definitions []Definition
}{
Timestamp: time.Now(),
Definitions: definitions,
}); err != nil {
log.Fatal(err)
}
dst, err := format.Source(buf.Bytes())
if err != nil {
log.Printf("%s", buf.Bytes())
log.Fatal(err)
}
if err := ioutil.WriteFile("cf_error.go", dst, 0600); err != nil {
log.Fatal(err)
}
}
// destutter ensures that s does not end in "Error".
func destutter(s string) string {
return strings.TrimSuffix(s, "Error")
}
var packageTemplate = template.Must(template.New("").Funcs(template.FuncMap{
"destutter": destutter,
}).Parse(`
package cfclient
// Code generated by go generate. DO NOT EDIT.
// This file was generated by robots at
// {{ .Timestamp }}
import "github.com/pkg/errors"
{{- range .Definitions }}
{{$method := printf "Is%sError" (.Name | destutter) }}
// {{ $method }} returns a boolean indicating whether
// the error is known to report the Cloud Foundry error:
// - Cloud Foundry code: {{ .CFCode }}
// - HTTP code: {{ .HTTPCode }}
// - message: {{ printf "%q" .Message }}
func Is{{ .Name | destutter }}Error(err error) bool {
cause := errors.Cause(err)
cferr, ok := cause.(CloudFoundryError)
if !ok {
return false
}
return cferr.Code == {{ .CFCode }}
}
{{- end }}
`))

View File

@ -1,703 +0,0 @@
// Copyright 2017, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build ignore
package main
import (
"bytes"
"go/format"
"io/ioutil"
"log"
"os"
"text/template"
)
func main() {
if len(os.Args) != 3 {
log.Fatalf("Usage: %s GO_TYPE OUTPUT_FILE", os.Args[0])
}
typ := os.Args[1]
path := os.Args[2]
b := new(bytes.Buffer)
t := template.Must(template.New("source").Parse(source))
if err := t.Execute(b, struct {
Type, GeneratedMessage string
}{typ, "// Code generated by sais_gen.go. DO NOT EDIT."}); err != nil {
log.Fatalf("Template.Execute error: %v", err)
}
out, err := format.Source(bytes.TrimSpace(b.Bytes()))
if err != nil {
log.Fatalf("format.Source error: %v", err)
}
if err := ioutil.WriteFile(path, out, 0644); err != nil {
log.Fatalf("ioutil.WriteFile error: %v", err)
}
}
const source = `
// Copyright 2015, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
{{.GeneratedMessage}}
// ====================================================
// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
// ====================================================
package sais
func getCounts_{{.Type}}(T []{{.Type}}, C []int, n, k int) {
var i int
for i = 0; i < k; i++ {
C[i] = 0
}
for i = 0; i < n; i++ {
C[T[i]]++
}
}
func getBuckets_{{.Type}}(C, B []int, k int, end bool) {
var i, sum int
if end {
for i = 0; i < k; i++ {
sum += C[i]
B[i] = sum
}
} else {
for i = 0; i < k; i++ {
sum += C[i]
B[i] = sum - C[i]
}
}
}
func sortLMS1_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) {
var b, i, j int
var c0, c1 int
// Compute SAl.
if &C[0] == &B[0] {
getCounts_{{.Type}}(T, C, n, k)
}
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
j = n - 1
c1 = int(T[j])
b = B[c1]
j--
if int(T[j]) < c1 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
for i = 0; i < n; i++ {
if j = SA[i]; j > 0 {
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
j--
if int(T[j]) < c1 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
SA[i] = 0
} else if j < 0 {
SA[i] = ^j
}
}
// Compute SAs.
if &C[0] == &B[0] {
getCounts_{{.Type}}(T, C, n, k)
}
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
c1 = 0
b = B[c1]
for i = n - 1; i >= 0; i-- {
if j = SA[i]; j > 0 {
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
j--
b--
if int(T[j]) > c1 {
SA[b] = ^(j + 1)
} else {
SA[b] = j
}
SA[i] = 0
}
}
}
func postProcLMS1_{{.Type}}(T []{{.Type}}, SA []int, n, m int) int {
var i, j, p, q, plen, qlen, name int
var c0, c1 int
var diff bool
// Compact all the sorted substrings into the first m items of SA.
// 2*m must be not larger than n (provable).
for i = 0; SA[i] < 0; i++ {
SA[i] = ^SA[i]
}
if i < m {
for j, i = i, i+1; ; i++ {
if p = SA[i]; p < 0 {
SA[j] = ^p
j++
SA[i] = 0
if j == m {
break
}
}
}
}
// Store the length of all substrings.
i = n - 1
j = n - 1
c0 = int(T[n-1])
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
for i >= 0 {
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 > c1 {
break
}
}
if i >= 0 {
SA[m+((i+1)>>1)] = j - i
j = i + 1
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
}
}
// Find the lexicographic names of all substrings.
name = 0
qlen = 0
for i, q = 0, n; i < m; i++ {
p = SA[i]
plen = SA[m+(p>>1)]
diff = true
if (plen == qlen) && ((q + plen) < n) {
for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ {
}
if j == plen {
diff = false
}
}
if diff {
name++
q = p
qlen = plen
}
SA[m+(p>>1)] = name
}
return name
}
func sortLMS2_{{.Type}}(T []{{.Type}}, SA, C, B, D []int, n, k int) {
var b, i, j, t, d int
var c0, c1 int
// Compute SAl.
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
j = n - 1
c1 = int(T[j])
b = B[c1]
j--
if int(T[j]) < c1 {
t = 1
} else {
t = 0
}
j += n
if t&1 > 0 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
for i, d = 0, 0; i < n; i++ {
if j = SA[i]; j > 0 {
if n <= j {
d += 1
j -= n
}
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
j--
t = int(c0) << 1
if int(T[j]) < c1 {
t |= 1
}
if D[t] != d {
j += n
D[t] = d
}
if t&1 > 0 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
SA[i] = 0
} else if j < 0 {
SA[i] = ^j
}
}
for i = n - 1; 0 <= i; i-- {
if SA[i] > 0 {
if SA[i] < n {
SA[i] += n
for j = i - 1; SA[j] < n; j-- {
}
SA[j] -= n
i = j
}
}
}
// Compute SAs.
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
c1 = 0
b = B[c1]
for i, d = n-1, d+1; i >= 0; i-- {
if j = SA[i]; j > 0 {
if n <= j {
d += 1
j -= n
}
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
j--
t = int(c0) << 1
if int(T[j]) > c1 {
t |= 1
}
if D[t] != d {
j += n
D[t] = d
}
b--
if t&1 > 0 {
SA[b] = ^(j + 1)
} else {
SA[b] = j
}
SA[i] = 0
}
}
}
func postProcLMS2_{{.Type}}(SA []int, n, m int) int {
var i, j, d, name int
// Compact all the sorted LMS substrings into the first m items of SA.
name = 0
for i = 0; SA[i] < 0; i++ {
j = ^SA[i]
if n <= j {
name += 1
}
SA[i] = j
}
if i < m {
for d, i = i, i+1; ; i++ {
if j = SA[i]; j < 0 {
j = ^j
if n <= j {
name += 1
}
SA[d] = j
d++
SA[i] = 0
if d == m {
break
}
}
}
}
if name < m {
// Store the lexicographic names.
for i, d = m-1, name+1; 0 <= i; i-- {
if j = SA[i]; n <= j {
j -= n
d--
}
SA[m+(j>>1)] = d
}
} else {
// Unset flags.
for i = 0; i < m; i++ {
if j = SA[i]; n <= j {
j -= n
SA[i] = j
}
}
}
return name
}
func induceSA_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) {
var b, i, j int
var c0, c1 int
// Compute SAl.
if &C[0] == &B[0] {
getCounts_{{.Type}}(T, C, n, k)
}
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
j = n - 1
c1 = int(T[j])
b = B[c1]
if j > 0 && int(T[j-1]) < c1 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
for i = 0; i < n; i++ {
j = SA[i]
SA[i] = ^j
if j > 0 {
j--
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
if j > 0 && int(T[j-1]) < c1 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
}
}
// Compute SAs.
if &C[0] == &B[0] {
getCounts_{{.Type}}(T, C, n, k)
}
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
c1 = 0
b = B[c1]
for i = n - 1; i >= 0; i-- {
if j = SA[i]; j > 0 {
j--
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
b--
if (j == 0) || (int(T[j-1]) > c1) {
SA[b] = ^j
} else {
SA[b] = j
}
} else {
SA[i] = ^j
}
}
}
func computeSA_{{.Type}}(T []{{.Type}}, SA []int, fs, n, k int) {
const (
minBucketSize = 512
sortLMS2Limit = 0x3fffffff
)
var C, B, D, RA []int
var bo int // Offset of B relative to SA
var b, i, j, m, p, q, name, newfs int
var c0, c1 int
var flags uint
if k <= minBucketSize {
C = make([]int, k)
if k <= fs {
bo = n + fs - k
B = SA[bo:]
flags = 1
} else {
B = make([]int, k)
flags = 3
}
} else if k <= fs {
C = SA[n+fs-k:]
if k <= fs-k {
bo = n + fs - 2*k
B = SA[bo:]
flags = 0
} else if k <= 4*minBucketSize {
B = make([]int, k)
flags = 2
} else {
B = C
flags = 8
}
} else {
C = make([]int, k)
B = C
flags = 4 | 8
}
if n <= sortLMS2Limit && 2 <= (n/k) {
if flags&1 > 0 {
if 2*k <= fs-k {
flags |= 32
} else {
flags |= 16
}
} else if flags == 0 && 2*k <= (fs-2*k) {
flags |= 32
}
}
// Stage 1: Reduce the problem by at least 1/2.
// Sort all the LMS-substrings.
getCounts_{{.Type}}(T, C, n, k)
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
for i = 0; i < n; i++ {
SA[i] = 0
}
b = -1
i = n - 1
j = n
m = 0
c0 = int(T[n-1])
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
for i >= 0 {
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 > c1 {
break
}
}
if i >= 0 {
if b >= 0 {
SA[b] = j
}
B[c1]--
b = B[c1]
j = i
m++
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
}
}
if m > 1 {
if flags&(16|32) > 0 {
if flags&16 > 0 {
D = make([]int, 2*k)
} else {
D = SA[bo-2*k:]
}
B[T[j+1]]++
for i, j = 0, 0; i < k; i++ {
j += C[i]
if B[i] != j {
SA[B[i]] += n
}
D[i] = 0
D[i+k] = 0
}
sortLMS2_{{.Type}}(T, SA, C, B, D, n, k)
name = postProcLMS2_{{.Type}}(SA, n, m)
} else {
sortLMS1_{{.Type}}(T, SA, C, B, n, k)
name = postProcLMS1_{{.Type}}(T, SA, n, m)
}
} else if m == 1 {
SA[b] = j + 1
name = 1
} else {
name = 0
}
// Stage 2: Solve the reduced problem.
// Recurse if names are not yet unique.
if name < m {
newfs = n + fs - 2*m
if flags&(1|4|8) == 0 {
if k+name <= newfs {
newfs -= k
} else {
flags |= 8
}
}
RA = SA[m+newfs:]
for i, j = m+(n>>1)-1, m-1; m <= i; i-- {
if SA[i] != 0 {
RA[j] = SA[i] - 1
j--
}
}
computeSA_int(RA, SA, newfs, m, name)
i = n - 1
j = m - 1
c0 = int(T[n-1])
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
for i >= 0 {
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 > c1 {
break
}
}
if i >= 0 {
RA[j] = i + 1
j--
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
}
}
for i = 0; i < m; i++ {
SA[i] = RA[SA[i]]
}
if flags&4 > 0 {
B = make([]int, k)
C = B
}
if flags&2 > 0 {
B = make([]int, k)
}
}
// Stage 3: Induce the result for the original problem.
if flags&8 > 0 {
getCounts_{{.Type}}(T, C, n, k)
}
// Put all left-most S characters into their buckets.
if m > 1 {
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
i = m - 1
j = n
p = SA[m-1]
c1 = int(T[p])
for {
c0 = c1
q = B[c0]
for q < j {
j--
SA[j] = 0
}
for {
j--
SA[j] = p
if i--; i < 0 {
break
}
p = SA[i]
if c1 = int(T[p]); c1 != c0 {
break
}
}
if i < 0 {
break
}
}
for j > 0 {
j--
SA[j] = 0
}
}
induceSA_{{.Type}}(T, SA, C, B, n, k)
}
`

View File

@ -1,332 +0,0 @@
// Copyright 2017 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// gen-accessors generates accessor methods for structs with pointer fields.
//
// It is meant to be used by the go-github authors in conjunction with the
// go generate tool before sending a commit to GitHub.
package main
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"sort"
"strings"
"text/template"
)
const (
fileSuffix = "-accessors.go"
)
var (
verbose = flag.Bool("v", false, "Print verbose log messages")
sourceTmpl = template.Must(template.New("source").Parse(source))
// blacklistStructMethod lists "struct.method" combos to skip.
blacklistStructMethod = map[string]bool{
"RepositoryContent.GetContent": true,
"Client.GetBaseURL": true,
"Client.GetUploadURL": true,
"ErrorResponse.GetResponse": true,
"RateLimitError.GetResponse": true,
"AbuseRateLimitError.GetResponse": true,
}
// blacklistStruct lists structs to skip.
blacklistStruct = map[string]bool{
"Client": true,
}
)
func logf(fmt string, args ...interface{}) {
if *verbose {
log.Printf(fmt, args...)
}
}
func main() {
flag.Parse()
fset := token.NewFileSet()
pkgs, err := parser.ParseDir(fset, ".", sourceFilter, 0)
if err != nil {
log.Fatal(err)
return
}
for pkgName, pkg := range pkgs {
t := &templateData{
filename: pkgName + fileSuffix,
Year: 2017,
Package: pkgName,
Imports: map[string]string{},
}
for filename, f := range pkg.Files {
logf("Processing %v...", filename)
if err := t.processAST(f); err != nil {
log.Fatal(err)
}
}
if err := t.dump(); err != nil {
log.Fatal(err)
}
}
logf("Done.")
}
func (t *templateData) processAST(f *ast.File) error {
for _, decl := range f.Decls {
gd, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if !ok {
continue
}
// Skip unexported identifiers.
if !ts.Name.IsExported() {
logf("Struct %v is unexported; skipping.", ts.Name)
continue
}
// Check if the struct is blacklisted.
if blacklistStruct[ts.Name.Name] {
logf("Struct %v is blacklisted; skipping.", ts.Name)
continue
}
st, ok := ts.Type.(*ast.StructType)
if !ok {
continue
}
for _, field := range st.Fields.List {
se, ok := field.Type.(*ast.StarExpr)
if len(field.Names) == 0 || !ok {
continue
}
fieldName := field.Names[0]
// Skip unexported identifiers.
if !fieldName.IsExported() {
logf("Field %v is unexported; skipping.", fieldName)
continue
}
// Check if "struct.method" is blacklisted.
if key := fmt.Sprintf("%v.Get%v", ts.Name, fieldName); blacklistStructMethod[key] {
logf("Method %v is blacklisted; skipping.", key)
continue
}
switch x := se.X.(type) {
case *ast.ArrayType:
t.addArrayType(x, ts.Name.String(), fieldName.String())
case *ast.Ident:
t.addIdent(x, ts.Name.String(), fieldName.String())
case *ast.MapType:
t.addMapType(x, ts.Name.String(), fieldName.String())
case *ast.SelectorExpr:
t.addSelectorExpr(x, ts.Name.String(), fieldName.String())
default:
logf("processAST: type %q, field %q, unknown %T: %+v", ts.Name, fieldName, x, x)
}
}
}
}
return nil
}
func sourceFilter(fi os.FileInfo) bool {
return !strings.HasSuffix(fi.Name(), "_test.go") && !strings.HasSuffix(fi.Name(), fileSuffix)
}
func (t *templateData) dump() error {
if len(t.Getters) == 0 {
logf("No getters for %v; skipping.", t.filename)
return nil
}
// Sort getters by ReceiverType.FieldName.
sort.Sort(byName(t.Getters))
var buf bytes.Buffer
if err := sourceTmpl.Execute(&buf, t); err != nil {
return err
}
clean, err := format.Source(buf.Bytes())
if err != nil {
return err
}
logf("Writing %v...", t.filename)
return ioutil.WriteFile(t.filename, clean, 0644)
}
func newGetter(receiverType, fieldName, fieldType, zeroValue string, namedStruct bool) *getter {
return &getter{
sortVal: strings.ToLower(receiverType) + "." + strings.ToLower(fieldName),
ReceiverVar: strings.ToLower(receiverType[:1]),
ReceiverType: receiverType,
FieldName: fieldName,
FieldType: fieldType,
ZeroValue: zeroValue,
NamedStruct: namedStruct,
}
}
func (t *templateData) addArrayType(x *ast.ArrayType, receiverType, fieldName string) {
var eltType string
switch elt := x.Elt.(type) {
case *ast.Ident:
eltType = elt.String()
default:
logf("addArrayType: type %q, field %q: unknown elt type: %T %+v; skipping.", receiverType, fieldName, elt, elt)
return
}
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, "[]"+eltType, "nil", false))
}
func (t *templateData) addIdent(x *ast.Ident, receiverType, fieldName string) {
var zeroValue string
var namedStruct = false
switch x.String() {
case "int", "int64":
zeroValue = "0"
case "string":
zeroValue = `""`
case "bool":
zeroValue = "false"
case "Timestamp":
zeroValue = "Timestamp{}"
default:
zeroValue = "nil"
namedStruct = true
}
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, x.String(), zeroValue, namedStruct))
}
func (t *templateData) addMapType(x *ast.MapType, receiverType, fieldName string) {
var keyType string
switch key := x.Key.(type) {
case *ast.Ident:
keyType = key.String()
default:
logf("addMapType: type %q, field %q: unknown key type: %T %+v; skipping.", receiverType, fieldName, key, key)
return
}
var valueType string
switch value := x.Value.(type) {
case *ast.Ident:
valueType = value.String()
default:
logf("addMapType: type %q, field %q: unknown value type: %T %+v; skipping.", receiverType, fieldName, value, value)
return
}
fieldType := fmt.Sprintf("map[%v]%v", keyType, valueType)
zeroValue := fmt.Sprintf("map[%v]%v{}", keyType, valueType)
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false))
}
func (t *templateData) addSelectorExpr(x *ast.SelectorExpr, receiverType, fieldName string) {
if strings.ToLower(fieldName[:1]) == fieldName[:1] { // Non-exported field.
return
}
var xX string
if xx, ok := x.X.(*ast.Ident); ok {
xX = xx.String()
}
switch xX {
case "time", "json":
if xX == "json" {
t.Imports["encoding/json"] = "encoding/json"
} else {
t.Imports[xX] = xX
}
fieldType := fmt.Sprintf("%v.%v", xX, x.Sel.Name)
zeroValue := fmt.Sprintf("%v.%v{}", xX, x.Sel.Name)
if xX == "time" && x.Sel.Name == "Duration" {
zeroValue = "0"
}
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false))
default:
logf("addSelectorExpr: xX %q, type %q, field %q: unknown x=%+v; skipping.", xX, receiverType, fieldName, x)
}
}
type templateData struct {
filename string
Year int
Package string
Imports map[string]string
Getters []*getter
}
type getter struct {
sortVal string // Lower-case version of "ReceiverType.FieldName".
ReceiverVar string // The one-letter variable name to match the ReceiverType.
ReceiverType string
FieldName string
FieldType string
ZeroValue string
NamedStruct bool // Getter for named struct.
}
type byName []*getter
func (b byName) Len() int { return len(b) }
func (b byName) Less(i, j int) bool { return b[i].sortVal < b[j].sortVal }
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
const source = `// Copyright {{.Year}} The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by gen-accessors; DO NOT EDIT.
package {{.Package}}
{{with .Imports}}
import (
{{- range . -}}
"{{.}}"
{{end -}}
)
{{end}}
{{range .Getters}}
{{if .NamedStruct}}
// Get{{.FieldName}} returns the {{.FieldName}} field.
func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() *{{.FieldType}} {
if {{.ReceiverVar}} == nil {
return {{.ZeroValue}}
}
return {{.ReceiverVar}}.{{.FieldName}}
}
{{else}}
// Get{{.FieldName}} returns the {{.FieldName}} field if it's non-nil, zero value otherwise.
func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() {{.FieldType}} {
if {{.ReceiverVar}} == nil || {{.ReceiverVar}}.{{.FieldName}} == nil {
return {{.ZeroValue}}
}
return *{{.ReceiverVar}}.{{.FieldName}}
}
{{end}}
{{end}}
`

353
vendor/github.com/hashicorp/consul-template/LICENSE generated vendored Normal file
View File

@ -0,0 +1,353 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View File

@ -0,0 +1,428 @@
package child
import (
"errors"
"fmt"
"io"
"log"
"math/rand"
"os"
"os/exec"
"strings"
"sync"
"syscall"
"time"
)
func init() {
// Seed the default rand Source with current time to produce better random
// numbers used with splay
rand.Seed(time.Now().UnixNano())
}
var (
// ErrMissingCommand is the error returned when no command is specified
// to run.
ErrMissingCommand = errors.New("missing command")
// ExitCodeOK is the default OK exit code.
ExitCodeOK = 0
// ExitCodeError is the default error code returned when the child exits with
// an error without a more specific code.
ExitCodeError = 127
)
// Child is a wrapper around a child process which can be used to send signals
// and manage the processes' lifecycle.
type Child struct {
sync.RWMutex
stdin io.Reader
stdout, stderr io.Writer
command string
args []string
env []string
timeout time.Duration
reloadSignal os.Signal
killSignal os.Signal
killTimeout time.Duration
splay time.Duration
// cmd is the actual child process under management.
cmd *exec.Cmd
// exitCh is the channel where the processes exit will be returned.
exitCh chan int
// stopLock is the mutex to lock when stopping. stopCh is the circuit breaker
// to force-terminate any waiting splays to kill the process now. stopped is
// a boolean that tells us if we have previously been stopped.
stopLock sync.RWMutex
stopCh chan struct{}
stopped bool
}
// NewInput is input to the NewChild function.
type NewInput struct {
// Stdin is the io.Reader where input will come from. This is sent directly to
// the child process. Stdout and Stderr represent the io.Writer objects where
// the child process will send output and errorput.
Stdin io.Reader
Stdout, Stderr io.Writer
// Command is the name of the command to execute. Args are the list of
// arguments to pass when starting the command.
Command string
Args []string
// Timeout is the maximum amount of time to allow the command to execute. If
// set to 0, the command is permitted to run infinitely.
Timeout time.Duration
// Env represents the condition of the child processes' environment
// variables. Only these environment variables will be given to the child, so
// it is the responsibility of the caller to include the parent processes
// environment, if required. This should be in the key=value format.
Env []string
// ReloadSignal is the signal to send to reload this process. This value may
// be nil.
ReloadSignal os.Signal
// KillSignal is the signal to send to gracefully kill this process. This
// value may be nil.
KillSignal os.Signal
// KillTimeout is the amount of time to wait for the process to gracefully
// terminate before force-killing.
KillTimeout time.Duration
// Splay is the maximum random amount of time to wait before sending signals.
// This option helps reduce the thundering herd problem by effectively
// sleeping for a random amount of time before sending the signal. This
// prevents multiple processes from all signaling at the same time. This value
// may be zero (which disables the splay entirely).
Splay time.Duration
}
// New creates a new child process for management with high-level APIs for
// sending signals to the child process, restarting the child process, and
// gracefully terminating the child process.
func New(i *NewInput) (*Child, error) {
if i == nil {
i = new(NewInput)
}
if len(i.Command) == 0 {
return nil, ErrMissingCommand
}
child := &Child{
stdin: i.Stdin,
stdout: i.Stdout,
stderr: i.Stderr,
command: i.Command,
args: i.Args,
env: i.Env,
timeout: i.Timeout,
reloadSignal: i.ReloadSignal,
killSignal: i.KillSignal,
killTimeout: i.KillTimeout,
splay: i.Splay,
stopCh: make(chan struct{}, 1),
}
return child, nil
}
// ExitCh returns the current exit channel for this child process. This channel
// may change if the process is restarted, so implementers must not cache this
// value.
func (c *Child) ExitCh() <-chan int {
c.RLock()
defer c.RUnlock()
return c.exitCh
}
// Pid returns the pid of the child process. If no child process exists, 0 is
// returned.
func (c *Child) Pid() int {
c.RLock()
defer c.RUnlock()
return c.pid()
}
// Command returns the human-formatted command with arguments.
func (c *Child) Command() string {
list := append([]string{c.command}, c.args...)
return strings.Join(list, " ")
}
// Start starts and begins execution of the child process. A buffered channel
// is returned which is where the command's exit code will be returned upon
// exit. Any errors that occur prior to starting the command will be returned
// as the second error argument, but any errors returned by the command after
// execution will be returned as a non-zero value over the exit code channel.
func (c *Child) Start() error {
log.Printf("[INFO] (child) spawning: %s", c.Command())
c.Lock()
defer c.Unlock()
return c.start()
}
// Signal sends the signal to the child process, returning any errors that
// occur.
func (c *Child) Signal(s os.Signal) error {
log.Printf("[INFO] (child) receiving signal %q", s.String())
c.RLock()
defer c.RUnlock()
return c.signal(s)
}
// Reload sends the reload signal to the child process and does not wait for a
// response. If no reload signal was provided, the process is restarted and
// replaces the process attached to this Child.
func (c *Child) Reload() error {
if c.reloadSignal == nil {
log.Printf("[INFO] (child) restarting process")
// Take a full lock because start is going to replace the process. We also
// want to make sure that no other routines attempt to send reload signals
// during this transition.
c.Lock()
defer c.Unlock()
c.kill(false)
return c.start()
}
log.Printf("[INFO] (child) reloading process")
// We only need a read lock here because neither the process nor the exit
// channel are changing.
c.RLock()
defer c.RUnlock()
return c.reload()
}
// Kill sends the kill signal to the child process and waits for successful
// termination. If no kill signal is defined, the process is killed with the
// most aggressive kill signal. If the process does not gracefully stop within
// the provided KillTimeout, the process is force-killed. If a splay was
// provided, this function will sleep for a random period of time between 0 and
// the provided splay value to reduce the thundering herd problem. This function
// does not return any errors because it guarantees the process will be dead by
// the return of the function call.
func (c *Child) Kill() {
log.Printf("[INFO] (child) killing process")
c.Lock()
defer c.Unlock()
c.kill(false)
}
// Stop behaves almost identical to Kill except it suppresses future processes
// from being started by this child and it prevents the killing of the child
// process from sending its value back up the exit channel. This is useful
// when doing a graceful shutdown of an application.
func (c *Child) Stop() {
c.internalStop(false)
}
// StopImmediately behaves almost identical to Stop except it does not wait
// for any random splay if configured. This is used for performing a fast
// shutdown of consul-template and its children when a kill signal is received.
func (c *Child) StopImmediately() {
c.internalStop(true)
}
func (c *Child) internalStop(immediately bool) {
log.Printf("[INFO] (child) stopping process")
c.Lock()
defer c.Unlock()
c.stopLock.Lock()
defer c.stopLock.Unlock()
if c.stopped {
log.Printf("[WARN] (child) already stopped")
return
}
c.kill(immediately)
close(c.stopCh)
c.stopped = true
}
func (c *Child) start() error {
cmd := exec.Command(c.command, c.args...)
cmd.Stdin = c.stdin
cmd.Stdout = c.stdout
cmd.Stderr = c.stderr
cmd.Env = c.env
if err := cmd.Start(); err != nil {
return err
}
c.cmd = cmd
// Create a new exitCh so that previously invoked commands (if any) don't
// cause us to exit, and start a goroutine to wait for that process to end.
exitCh := make(chan int, 1)
go func() {
var code int
err := cmd.Wait()
if err == nil {
code = ExitCodeOK
} else {
code = ExitCodeError
if exiterr, ok := err.(*exec.ExitError); ok {
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
code = status.ExitStatus()
}
}
}
// If the child is in the process of killing, do not send a response back
// down the exit channel.
c.stopLock.RLock()
defer c.stopLock.RUnlock()
if c.stopped {
return
}
select {
case <-c.stopCh:
case exitCh <- code:
}
}()
c.exitCh = exitCh
// If a timeout was given, start the timer to wait for the child to exit
if c.timeout != 0 {
select {
case code := <-exitCh:
if code != 0 {
return fmt.Errorf(
"command exited with a non-zero exit status:\n"+
"\n"+
" %s\n"+
"\n"+
"This is assumed to be a failure. Please ensure the command\n"+
"exits with a zero exit status.",
c.Command(),
)
}
case <-time.After(c.timeout):
// Force-kill the process
c.stopLock.Lock()
defer c.stopLock.Unlock()
if c.cmd != nil && c.cmd.Process != nil {
c.cmd.Process.Kill()
}
return fmt.Errorf(
"command did not exit within %q:\n"+
"\n"+
" %s\n"+
"\n"+
"Commands must exit in a timely manner in order for processing to\n"+
"continue. Consider using a process supervisor or utilizing the\n"+
"built-in exec mode instead.",
c.timeout,
c.Command(),
)
}
}
return nil
}
func (c *Child) pid() int {
if !c.running() {
return 0
}
return c.cmd.Process.Pid
}
func (c *Child) signal(s os.Signal) error {
if !c.running() {
return nil
}
return c.cmd.Process.Signal(s)
}
func (c *Child) reload() error {
select {
case <-c.stopCh:
case <-c.randomSplay():
}
return c.signal(c.reloadSignal)
}
func (c *Child) kill(immediately bool) {
if !c.running() {
return
}
exited := false
process := c.cmd.Process
if c.cmd.ProcessState != nil {
log.Printf("[DEBUG] (child) Kill() called but process dead; not waiting for splay.")
} else if immediately {
log.Printf("[DEBUG] (child) Kill() called but performing immediate shutdown; not waiting for splay.")
} else {
select {
case <-c.stopCh:
case <-c.randomSplay():
}
}
if c.killSignal != nil {
if err := process.Signal(c.killSignal); err == nil {
// Wait a few seconds for it to exit
killCh := make(chan struct{}, 1)
go func() {
defer close(killCh)
process.Wait()
}()
select {
case <-c.stopCh:
case <-killCh:
exited = true
case <-time.After(c.killTimeout):
}
}
}
if !exited {
process.Kill()
}
c.cmd = nil
}
func (c *Child) running() bool {
return c.cmd != nil && c.cmd.Process != nil
}
func (c *Child) randomSplay() <-chan time.Time {
if c.splay == 0 {
return time.After(0)
}
ns := c.splay.Nanoseconds()
offset := rand.Int63n(ns)
t := time.Duration(offset)
log.Printf("[DEBUG] (child) waiting %.2fs for random splay", t.Seconds())
return time.After(t)
}

View File

@ -0,0 +1,142 @@
package config
import (
"errors"
"fmt"
"strings"
)
var (
// ErrAuthStringEmpty is the error returned with authentication is provided,
// but empty.
ErrAuthStringEmpty = errors.New("auth: cannot be empty")
)
// AuthConfig is the HTTP basic authentication data.
type AuthConfig struct {
Enabled *bool `mapstructure:"enabled"`
Username *string `mapstructure:"username"`
Password *string `mapstructure:"password"`
}
// DefaultAuthConfig is the default configuration.
func DefaultAuthConfig() *AuthConfig {
return &AuthConfig{}
}
// ParseAuthConfig parses the auth into username:password.
func ParseAuthConfig(s string) (*AuthConfig, error) {
if s == "" {
return nil, ErrAuthStringEmpty
}
var a AuthConfig
if strings.Contains(s, ":") {
split := strings.SplitN(s, ":", 2)
a.Username = String(split[0])
a.Password = String(split[1])
} else {
a.Username = String(s)
}
return &a, nil
}
// Copy returns a deep copy of this configuration.
func (c *AuthConfig) Copy() *AuthConfig {
if c == nil {
return nil
}
var o AuthConfig
o.Enabled = c.Enabled
o.Username = c.Username
o.Password = c.Password
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *AuthConfig) Merge(o *AuthConfig) *AuthConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Enabled != nil {
r.Enabled = o.Enabled
}
if o.Username != nil {
r.Username = o.Username
}
if o.Password != nil {
r.Password = o.Password
}
return r
}
// Finalize ensures there no nil pointers.
func (c *AuthConfig) Finalize() {
if c.Enabled == nil {
c.Enabled = Bool(false ||
StringPresent(c.Username) ||
StringPresent(c.Password))
}
if c.Username == nil {
c.Username = String("")
}
if c.Password == nil {
c.Password = String("")
}
if c.Enabled == nil {
c.Enabled = Bool(*c.Username != "" || *c.Password != "")
}
}
// GoString defines the printable version of this struct.
func (c *AuthConfig) GoString() string {
if c == nil {
return "(*AuthConfig)(nil)"
}
return fmt.Sprintf("&AuthConfig{"+
"Enabled:%s, "+
"Username:%s, "+
"Password:%s"+
"}",
BoolGoString(c.Enabled),
StringGoString(c.Username),
StringGoString(c.Password),
)
}
// String is the string representation of this authentication. If authentication
// is not enabled, this returns the empty string. The username and password will
// be separated by a colon.
func (c *AuthConfig) String() string {
if !BoolVal(c.Enabled) {
return ""
}
if c.Password != nil {
return fmt.Sprintf("%s:%s", StringVal(c.Username), StringVal(c.Password))
}
return StringVal(c.Username)
}

View File

@ -0,0 +1,606 @@
package config
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"syscall"
"time"
"github.com/hashicorp/consul-template/signals"
"github.com/hashicorp/hcl"
homedir "github.com/mitchellh/go-homedir"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
)
const (
// DefaultLogLevel is the default logging level.
DefaultLogLevel = "WARN"
// DefaultMaxStale is the default staleness permitted. This enables stale
// queries by default for performance reasons.
DefaultMaxStale = 2 * time.Second
// DefaultReloadSignal is the default signal for reload.
DefaultReloadSignal = syscall.SIGHUP
// DefaultKillSignal is the default signal for termination.
DefaultKillSignal = syscall.SIGINT
)
var (
// homePath is the location to the user's home directory.
homePath, _ = homedir.Dir()
)
// Config is used to configure Consul Template
type Config struct {
// Consul is the configuration for connecting to a Consul cluster.
Consul *ConsulConfig `mapstructure:"consul"`
// Dedup is used to configure the dedup settings
Dedup *DedupConfig `mapstructure:"deduplicate"`
// Exec is the configuration for exec/supervise mode.
Exec *ExecConfig `mapstructure:"exec"`
// KillSignal is the signal to listen for a graceful terminate event.
KillSignal *os.Signal `mapstructure:"kill_signal"`
// LogLevel is the level with which to log for this config.
LogLevel *string `mapstructure:"log_level"`
// MaxStale is the maximum amount of time for staleness from Consul as given
// by LastContact. If supplied, Consul Template will query all servers instead
// of just the leader.
MaxStale *time.Duration `mapstructure:"max_stale"`
// PidFile is the path on disk where a PID file should be written containing
// this processes PID.
PidFile *string `mapstructure:"pid_file"`
// ReloadSignal is the signal to listen for a reload event.
ReloadSignal *os.Signal `mapstructure:"reload_signal"`
// Syslog is the configuration for syslog.
Syslog *SyslogConfig `mapstructure:"syslog"`
// Templates is the list of templates.
Templates *TemplateConfigs `mapstructure:"template"`
// Vault is the configuration for connecting to a vault server.
Vault *VaultConfig `mapstructure:"vault"`
// Wait is the quiescence timers.
Wait *WaitConfig `mapstructure:"wait"`
// Additional command line options
// Run once, executing each template exactly once, and exit
Once bool
}
// Copy returns a deep copy of the current configuration. This is useful because
// the nested data structures may be shared.
func (c *Config) Copy() *Config {
if c == nil {
return nil
}
var o Config
o.Consul = c.Consul
if c.Consul != nil {
o.Consul = c.Consul.Copy()
}
if c.Dedup != nil {
o.Dedup = c.Dedup.Copy()
}
if c.Exec != nil {
o.Exec = c.Exec.Copy()
}
o.KillSignal = c.KillSignal
o.LogLevel = c.LogLevel
o.MaxStale = c.MaxStale
o.PidFile = c.PidFile
o.ReloadSignal = c.ReloadSignal
if c.Syslog != nil {
o.Syslog = c.Syslog.Copy()
}
if c.Templates != nil {
o.Templates = c.Templates.Copy()
}
if c.Vault != nil {
o.Vault = c.Vault.Copy()
}
if c.Wait != nil {
o.Wait = c.Wait.Copy()
}
o.Once = c.Once
return &o
}
// Merge merges the values in config into this config object. Values in the
// config object overwrite the values in c.
func (c *Config) Merge(o *Config) *Config {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Consul != nil {
r.Consul = r.Consul.Merge(o.Consul)
}
if o.Dedup != nil {
r.Dedup = r.Dedup.Merge(o.Dedup)
}
if o.Exec != nil {
r.Exec = r.Exec.Merge(o.Exec)
}
if o.KillSignal != nil {
r.KillSignal = o.KillSignal
}
if o.LogLevel != nil {
r.LogLevel = o.LogLevel
}
if o.MaxStale != nil {
r.MaxStale = o.MaxStale
}
if o.PidFile != nil {
r.PidFile = o.PidFile
}
if o.ReloadSignal != nil {
r.ReloadSignal = o.ReloadSignal
}
if o.Syslog != nil {
r.Syslog = r.Syslog.Merge(o.Syslog)
}
if o.Templates != nil {
r.Templates = r.Templates.Merge(o.Templates)
}
if o.Vault != nil {
r.Vault = r.Vault.Merge(o.Vault)
}
if o.Wait != nil {
r.Wait = r.Wait.Merge(o.Wait)
}
r.Once = o.Once
return r
}
// Parse parses the given string contents as a config
func Parse(s string) (*Config, error) {
var shadow interface{}
if err := hcl.Decode(&shadow, s); err != nil {
return nil, errors.Wrap(err, "error decoding config")
}
// Convert to a map and flatten the keys we want to flatten
parsed, ok := shadow.(map[string]interface{})
if !ok {
return nil, errors.New("error converting config")
}
flattenKeys(parsed, []string{
"auth",
"consul",
"consul.auth",
"consul.retry",
"consul.ssl",
"consul.transport",
"deduplicate",
"env",
"exec",
"exec.env",
"ssl",
"syslog",
"vault",
"vault.retry",
"vault.ssl",
"vault.transport",
"wait",
})
// FlattenFlatten keys belonging to the templates. We cannot do this above
// because it is an array of templates.
if templates, ok := parsed["template"].([]map[string]interface{}); ok {
for _, template := range templates {
flattenKeys(template, []string{
"env",
"exec",
"exec.env",
"wait",
})
}
}
// Create a new, empty config
var c Config
// Use mapstructure to populate the basic config fields
var md mapstructure.Metadata
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.ComposeDecodeHookFunc(
ConsulStringToStructFunc(),
StringToFileModeFunc(),
signals.StringToSignalFunc(),
StringToWaitDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
mapstructure.StringToTimeDurationHookFunc(),
),
ErrorUnused: true,
Metadata: &md,
Result: &c,
})
if err != nil {
return nil, errors.Wrap(err, "mapstructure decoder creation failed")
}
if err := decoder.Decode(parsed); err != nil {
return nil, errors.Wrap(err, "mapstructure decode failed")
}
return &c, nil
}
// Must returns a config object that must compile. If there are any errors, this
// function will panic. This is most useful in testing or constants.
func Must(s string) *Config {
c, err := Parse(s)
if err != nil {
log.Fatal(err)
}
return c
}
// TestConfig returns a default, finalized config, with the provided
// configuration taking precedence.
func TestConfig(c *Config) *Config {
d := DefaultConfig().Merge(c)
d.Finalize()
return d
}
// FromFile reads the configuration file at the given path and returns a new
// Config struct with the data populated.
func FromFile(path string) (*Config, error) {
c, err := ioutil.ReadFile(path)
if err != nil {
return nil, errors.Wrap(err, "from file: "+path)
}
config, err := Parse(string(c))
if err != nil {
return nil, errors.Wrap(err, "from file: "+path)
}
return config, nil
}
// FromPath iterates and merges all configuration files in a given
// directory, returning the resulting config.
func FromPath(path string) (*Config, error) {
// Ensure the given filepath exists
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil, errors.Wrap(err, "missing file/folder: "+path)
}
// Check if a file was given or a path to a directory
stat, err := os.Stat(path)
if err != nil {
return nil, errors.Wrap(err, "failed stating file: "+path)
}
// Recursively parse directories, single load files
if stat.Mode().IsDir() {
// Ensure the given filepath has at least one config file
_, err := ioutil.ReadDir(path)
if err != nil {
return nil, errors.Wrap(err, "failed listing dir: "+path)
}
// Create a blank config to merge off of
var c *Config
// Potential bug: Walk does not follow symlinks!
err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
// If WalkFunc had an error, just return it
if err != nil {
return err
}
// Do nothing for directories
if info.IsDir() {
return nil
}
// Parse and merge the config
newConfig, err := FromFile(path)
if err != nil {
return err
}
c = c.Merge(newConfig)
return nil
})
if err != nil {
return nil, errors.Wrap(err, "walk error")
}
return c, nil
} else if stat.Mode().IsRegular() {
return FromFile(path)
}
return nil, fmt.Errorf("unknown filetype: %q", stat.Mode().String())
}
// GoString defines the printable version of this struct.
func (c *Config) GoString() string {
if c == nil {
return "(*Config)(nil)"
}
return fmt.Sprintf("&Config{"+
"Consul:%#v, "+
"Dedup:%#v, "+
"Exec:%#v, "+
"KillSignal:%s, "+
"LogLevel:%s, "+
"MaxStale:%s, "+
"PidFile:%s, "+
"ReloadSignal:%s, "+
"Syslog:%#v, "+
"Templates:%#v, "+
"Vault:%#v, "+
"Wait:%#v,"+
"Once:%#v"+
"}",
c.Consul,
c.Dedup,
c.Exec,
SignalGoString(c.KillSignal),
StringGoString(c.LogLevel),
TimeDurationGoString(c.MaxStale),
StringGoString(c.PidFile),
SignalGoString(c.ReloadSignal),
c.Syslog,
c.Templates,
c.Vault,
c.Wait,
c.Once,
)
}
// Show diff between 2 Configs, useful in tests
func (expected *Config) Diff(actual *Config) string {
var b strings.Builder
fmt.Fprintf(&b, "\n")
ve := reflect.ValueOf(*expected)
va := reflect.ValueOf(*actual)
ct := ve.Type()
for i := 0; i < ve.NumField(); i++ {
fc := ve.Field(i)
fo := va.Field(i)
if !reflect.DeepEqual(fc.Interface(), fo.Interface()) {
fmt.Fprintf(&b, "%s:\n", ct.Field(i).Name)
fmt.Fprintf(&b, "\texp: %#v\n", fc.Interface())
fmt.Fprintf(&b, "\tact: %#v\n", fo.Interface())
}
}
return b.String()
}
// DefaultConfig returns the default configuration struct. Certain environment
// variables may be set which control the values for the default configuration.
func DefaultConfig() *Config {
return &Config{
Consul: DefaultConsulConfig(),
Dedup: DefaultDedupConfig(),
Exec: DefaultExecConfig(),
Syslog: DefaultSyslogConfig(),
Templates: DefaultTemplateConfigs(),
Vault: DefaultVaultConfig(),
Wait: DefaultWaitConfig(),
}
}
// Finalize ensures all configuration options have the default values, so it
// is safe to dereference the pointers later down the line. It also
// intelligently tries to activate stanzas that should be "enabled" because
// data was given, but the user did not explicitly add "Enabled: true" to the
// configuration.
func (c *Config) Finalize() {
if c == nil {
return
}
if c.Consul == nil {
c.Consul = DefaultConsulConfig()
}
c.Consul.Finalize()
if c.Dedup == nil {
c.Dedup = DefaultDedupConfig()
}
c.Dedup.Finalize()
if c.Exec == nil {
c.Exec = DefaultExecConfig()
}
c.Exec.Finalize()
if c.KillSignal == nil {
c.KillSignal = Signal(DefaultKillSignal)
}
if c.LogLevel == nil {
c.LogLevel = stringFromEnv([]string{
"CT_LOG",
"CONSUL_TEMPLATE_LOG",
}, DefaultLogLevel)
}
if c.MaxStale == nil {
c.MaxStale = TimeDuration(DefaultMaxStale)
}
if c.PidFile == nil {
c.PidFile = String("")
}
if c.ReloadSignal == nil {
c.ReloadSignal = Signal(DefaultReloadSignal)
}
if c.Syslog == nil {
c.Syslog = DefaultSyslogConfig()
}
c.Syslog.Finalize()
if c.Templates == nil {
c.Templates = DefaultTemplateConfigs()
}
c.Templates.Finalize()
if c.Vault == nil {
c.Vault = DefaultVaultConfig()
}
c.Vault.Finalize()
if c.Wait == nil {
c.Wait = DefaultWaitConfig()
}
c.Wait.Finalize()
// disable Wait if -once was specified
if c.Once {
c.Wait = &WaitConfig{Enabled: Bool(false)}
}
}
func stringFromEnv(list []string, def string) *string {
for _, s := range list {
if v := os.Getenv(s); v != "" {
return String(strings.TrimSpace(v))
}
}
return String(def)
}
func stringFromFile(list []string, def string) *string {
for _, s := range list {
c, err := ioutil.ReadFile(s)
if err == nil {
return String(strings.TrimSpace(string(c)))
}
}
return String(def)
}
func antiboolFromEnv(list []string, def bool) *bool {
for _, s := range list {
if v := os.Getenv(s); v != "" {
b, err := strconv.ParseBool(v)
if err == nil {
return Bool(!b)
}
}
}
return Bool(def)
}
func boolFromEnv(list []string, def bool) *bool {
for _, s := range list {
if v := os.Getenv(s); v != "" {
b, err := strconv.ParseBool(v)
if err == nil {
return Bool(b)
}
}
}
return Bool(def)
}
// flattenKeys is a function that takes a map[string]interface{} and recursively
// flattens any keys that are a []map[string]interface{} where the key is in the
// given list of keys.
func flattenKeys(m map[string]interface{}, keys []string) {
keyMap := make(map[string]struct{})
for _, key := range keys {
keyMap[key] = struct{}{}
}
var flatten func(map[string]interface{}, string)
flatten = func(m map[string]interface{}, parent string) {
for k, v := range m {
// Calculate the map key, since it could include a parent.
mapKey := k
if parent != "" {
mapKey = parent + "." + k
}
if _, ok := keyMap[mapKey]; !ok {
continue
}
switch typed := v.(type) {
case []map[string]interface{}:
if len(typed) > 0 {
last := typed[len(typed)-1]
flatten(last, mapKey)
m[k] = last
} else {
m[k] = nil
}
case map[string]interface{}:
flatten(typed, mapKey)
m[k] = typed
default:
m[k] = v
}
}
}
flatten(m, "")
}

View File

@ -0,0 +1,172 @@
package config
import "fmt"
// ConsulConfig contains the configurations options for connecting to a
// Consul cluster.
type ConsulConfig struct {
// Address is the address of the Consul server. It may be an IP or FQDN.
Address *string
// Auth is the HTTP basic authentication for communicating with Consul.
Auth *AuthConfig `mapstructure:"auth"`
// Retry is the configuration for specifying how to behave on failure.
Retry *RetryConfig `mapstructure:"retry"`
// SSL indicates we should use a secure connection while talking to
// Consul. This requires Consul to be configured to serve HTTPS.
SSL *SSLConfig `mapstructure:"ssl"`
// Token is the token to communicate with Consul securely.
Token *string
// Transport configures the low-level network connection details.
Transport *TransportConfig `mapstructure:"transport"`
}
// DefaultConsulConfig returns a configuration that is populated with the
// default values.
func DefaultConsulConfig() *ConsulConfig {
return &ConsulConfig{
Auth: DefaultAuthConfig(),
Retry: DefaultRetryConfig(),
SSL: DefaultSSLConfig(),
Transport: DefaultTransportConfig(),
}
}
// Copy returns a deep copy of this configuration.
func (c *ConsulConfig) Copy() *ConsulConfig {
if c == nil {
return nil
}
var o ConsulConfig
o.Address = c.Address
if c.Auth != nil {
o.Auth = c.Auth.Copy()
}
if c.Retry != nil {
o.Retry = c.Retry.Copy()
}
if c.SSL != nil {
o.SSL = c.SSL.Copy()
}
o.Token = c.Token
if c.Transport != nil {
o.Transport = c.Transport.Copy()
}
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *ConsulConfig) Merge(o *ConsulConfig) *ConsulConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Address != nil {
r.Address = o.Address
}
if o.Auth != nil {
r.Auth = r.Auth.Merge(o.Auth)
}
if o.Retry != nil {
r.Retry = r.Retry.Merge(o.Retry)
}
if o.SSL != nil {
r.SSL = r.SSL.Merge(o.SSL)
}
if o.Token != nil {
r.Token = o.Token
}
if o.Transport != nil {
r.Transport = r.Transport.Merge(o.Transport)
}
return r
}
// Finalize ensures there no nil pointers.
func (c *ConsulConfig) Finalize() {
if c.Address == nil {
c.Address = stringFromEnv([]string{
"CONSUL_HTTP_ADDR",
}, "")
}
if c.Auth == nil {
c.Auth = DefaultAuthConfig()
}
c.Auth.Finalize()
if c.Retry == nil {
c.Retry = DefaultRetryConfig()
}
c.Retry.Finalize()
if c.SSL == nil {
c.SSL = DefaultSSLConfig()
}
c.SSL.Finalize()
if c.Token == nil {
c.Token = stringFromEnv([]string{
"CONSUL_TOKEN",
"CONSUL_HTTP_TOKEN",
}, "")
}
if c.Transport == nil {
c.Transport = DefaultTransportConfig()
}
c.Transport.Finalize()
}
// GoString defines the printable version of this struct.
func (c *ConsulConfig) GoString() string {
if c == nil {
return "(*ConsulConfig)(nil)"
}
return fmt.Sprintf("&ConsulConfig{"+
"Address:%s, "+
"Auth:%#v, "+
"Retry:%#v, "+
"SSL:%#v, "+
"Token:%t, "+
"Transport:%#v"+
"}",
StringGoString(c.Address),
c.Auth,
c.Retry,
c.SSL,
StringPresent(c.Token),
c.Transport,
)
}

View File

@ -0,0 +1,197 @@
package config
import (
"fmt"
"os"
"time"
"github.com/hashicorp/consul-template/signals"
)
// Bool returns a pointer to the given bool.
func Bool(b bool) *bool {
return &b
}
// BoolVal returns the value of the boolean at the pointer, or false if the
// pointer is nil.
func BoolVal(b *bool) bool {
if b == nil {
return false
}
return *b
}
// BoolGoString returns the value of the boolean for printing in a string.
func BoolGoString(b *bool) string {
if b == nil {
return "(*bool)(nil)"
}
return fmt.Sprintf("%t", *b)
}
// BoolPresent returns a boolean indicating if the pointer is nil, or if the
// pointer is pointing to the zero value..
func BoolPresent(b *bool) bool {
if b == nil {
return false
}
return true
}
// FileMode returns a pointer to the given os.FileMode.
func FileMode(o os.FileMode) *os.FileMode {
return &o
}
// FileModeVal returns the value of the os.FileMode at the pointer, or 0 if the
// pointer is nil.
func FileModeVal(o *os.FileMode) os.FileMode {
if o == nil {
return 0
}
return *o
}
// FileModeGoString returns the value of the os.FileMode for printing in a
// string.
func FileModeGoString(o *os.FileMode) string {
if o == nil {
return "(*os.FileMode)(nil)"
}
return fmt.Sprintf("%q", *o)
}
// FileModePresent returns a boolean indicating if the pointer is nil, or if
// the pointer is pointing to the zero value.
func FileModePresent(o *os.FileMode) bool {
if o == nil {
return false
}
return *o != 0
}
// Int returns a pointer to the given int.
func Int(i int) *int {
return &i
}
// IntVal returns the value of the int at the pointer, or 0 if the pointer is
// nil.
func IntVal(i *int) int {
if i == nil {
return 0
}
return *i
}
// IntGoString returns the value of the int for printing in a string.
func IntGoString(i *int) string {
if i == nil {
return "(*int)(nil)"
}
return fmt.Sprintf("%d", *i)
}
// IntPresent returns a boolean indicating if the pointer is nil, or if the
// pointer is pointing to the zero value.
func IntPresent(i *int) bool {
if i == nil {
return false
}
return *i != 0
}
// Signal returns a pointer to the given os.Signal.
func Signal(s os.Signal) *os.Signal {
return &s
}
// SignalVal returns the value of the os.Signal at the pointer, or 0 if the
// pointer is nil.
func SignalVal(s *os.Signal) os.Signal {
if s == nil {
return (os.Signal)(nil)
}
return *s
}
// SignalGoString returns the value of the os.Signal for printing in a string.
func SignalGoString(s *os.Signal) string {
if s == nil {
return "(*os.Signal)(nil)"
}
if *s == nil {
return "<nil>"
}
return fmt.Sprintf("%q", *s)
}
// SignalPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value..
func SignalPresent(s *os.Signal) bool {
if s == nil {
return false
}
return *s != signals.SIGNIL
}
// String returns a pointer to the given string.
func String(s string) *string {
return &s
}
// StringVal returns the value of the string at the pointer, or "" if the
// pointer is nil.
func StringVal(s *string) string {
if s == nil {
return ""
}
return *s
}
// StringGoString returns the value of the string for printing in a string.
func StringGoString(s *string) string {
if s == nil {
return "(*string)(nil)"
}
return fmt.Sprintf("%q", *s)
}
// StringPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value..
func StringPresent(s *string) bool {
if s == nil {
return false
}
return *s != ""
}
// TimeDuration returns a pointer to the given time.Duration.
func TimeDuration(t time.Duration) *time.Duration {
return &t
}
// TimeDurationVal returns the value of the string at the pointer, or 0 if the
// pointer is nil.
func TimeDurationVal(t *time.Duration) time.Duration {
if t == nil {
return time.Duration(0)
}
return *t
}
// TimeDurationGoString returns the value of the time.Duration for printing in a
// string.
func TimeDurationGoString(t *time.Duration) string {
if t == nil {
return "(*time.Duration)(nil)"
}
return fmt.Sprintf("%s", t)
}
// TimeDurationPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value..
func TimeDurationPresent(t *time.Duration) bool {
if t == nil {
return false
}
return *t != 0
}

View File

@ -0,0 +1,132 @@
package config
import (
"fmt"
"time"
)
const (
// DefaultDedupPrefix is the default prefix used for deduplication mode.
DefaultDedupPrefix = "consul-template/dedup/"
// DefaultDedupTTL is the default TTL for deduplicate mode.
DefaultDedupTTL = 15 * time.Second
// DefaultDedupMaxStale is the default max staleness for the deduplication
// manager.
DefaultDedupMaxStale = DefaultMaxStale
)
// DedupConfig is used to enable the de-duplication mode, which depends
// on electing a leader per-template and watching of a key. This is used
// to reduce the cost of many instances of CT running the same template.
type DedupConfig struct {
// Controls if deduplication mode is enabled
Enabled *bool `mapstructure:"enabled"`
// MaxStale is the maximum amount of time to allow for stale queries.
MaxStale *time.Duration `mapstructure:"max_stale"`
// Controls the KV prefix used. Defaults to defaultDedupPrefix
Prefix *string `mapstructure:"prefix"`
// TTL is the Session TTL used for lock acquisition, defaults to 15 seconds.
TTL *time.Duration `mapstructure:"ttl"`
}
// DefaultDedupConfig returns a configuration that is populated with the
// default values.
func DefaultDedupConfig() *DedupConfig {
return &DedupConfig{}
}
// Copy returns a deep copy of this configuration.
func (c *DedupConfig) Copy() *DedupConfig {
if c == nil {
return nil
}
var o DedupConfig
o.Enabled = c.Enabled
o.MaxStale = c.MaxStale
o.Prefix = c.Prefix
o.TTL = c.TTL
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *DedupConfig) Merge(o *DedupConfig) *DedupConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Enabled != nil {
r.Enabled = o.Enabled
}
if o.MaxStale != nil {
r.MaxStale = o.MaxStale
}
if o.Prefix != nil {
r.Prefix = o.Prefix
}
if o.TTL != nil {
r.TTL = o.TTL
}
return r
}
// Finalize ensures there no nil pointers.
func (c *DedupConfig) Finalize() {
if c.Enabled == nil {
c.Enabled = Bool(false ||
TimeDurationPresent(c.MaxStale) ||
StringPresent(c.Prefix) ||
TimeDurationPresent(c.TTL))
}
if c.MaxStale == nil {
c.MaxStale = TimeDuration(DefaultDedupMaxStale)
}
if c.Prefix == nil {
c.Prefix = String(DefaultDedupPrefix)
}
if c.TTL == nil {
c.TTL = TimeDuration(DefaultDedupTTL)
}
}
// GoString defines the printable version of this struct.
func (c *DedupConfig) GoString() string {
if c == nil {
return "(*DedupConfig)(nil)"
}
return fmt.Sprintf("&DedupConfig{"+
"Enabled:%s, "+
"MaxStale:%s, "+
"Prefix:%s, "+
"TTL:%s"+
"}",
BoolGoString(c.Enabled),
TimeDurationGoString(c.MaxStale),
StringGoString(c.Prefix),
TimeDurationGoString(c.TTL),
)
}

View File

@ -0,0 +1,209 @@
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
)
// EnvConfig is an embeddable struct for things that accept environment
// variable filtering. You should not use this directly and it is only public
// for mapstructure's decoding.
type EnvConfig struct {
// BlacklistEnv specifies a list of environment variables to explicitly
// exclude from the list of environment variables populated to the child.
// If both WhitelistEnv and BlacklistEnv are provided, BlacklistEnv takes
// precedence over the values in WhitelistEnv.
Blacklist []string `mapstructure:"blacklist"`
// CustomEnv specifies custom environment variables to pass to the child
// process. These are provided programmatically, override any environment
// variables of the same name, are ignored from whitelist/blacklist, and
// are still included even if PristineEnv is set to true.
Custom []string `mapstructure:"custom"`
// PristineEnv specifies if the child process should inherit the parent's
// environment.
Pristine *bool `mapstructure:"pristine"`
// WhitelistEnv specifies a list of environment variables to exclusively
// include in the list of environment variables populated to the child.
Whitelist []string `mapstructure:"whitelist"`
}
// DefaultEnvConfig returns a configuration that is populated with the
// default values.
func DefaultEnvConfig() *EnvConfig {
return &EnvConfig{}
}
// Copy returns a deep copy of this configuration.
func (c *EnvConfig) Copy() *EnvConfig {
if c == nil {
return nil
}
var o EnvConfig
if c.Blacklist != nil {
o.Blacklist = append([]string{}, c.Blacklist...)
}
if c.Custom != nil {
o.Custom = append([]string{}, c.Custom...)
}
o.Pristine = c.Pristine
if c.Whitelist != nil {
o.Whitelist = append([]string{}, c.Whitelist...)
}
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *EnvConfig) Merge(o *EnvConfig) *EnvConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Blacklist != nil {
r.Blacklist = append(r.Blacklist, o.Blacklist...)
}
if o.Custom != nil {
r.Custom = append(r.Custom, o.Custom...)
}
if o.Pristine != nil {
r.Pristine = o.Pristine
}
if o.Whitelist != nil {
r.Whitelist = append(r.Whitelist, o.Whitelist...)
}
return r
}
// Env calculates and returns the finalized environment for this exec
// configuration. It takes into account pristine, custom environment, whitelist,
// and blacklist values.
func (c *EnvConfig) Env() []string {
// In pristine mode, just return the custom environment. If the user did not
// specify a custom environment, just return the empty slice to force an
// empty environment. We cannot return nil here because the later call to
// os/exec will think we want to inherit the parent.
if BoolVal(c.Pristine) {
if len(c.Custom) > 0 {
return c.Custom
}
return []string{}
}
// Pull all the key-value pairs out of the environment
environ := os.Environ()
keys := make([]string, len(environ))
env := make(map[string]string, len(environ))
for i, v := range environ {
list := strings.SplitN(v, "=", 2)
keys[i] = list[0]
env[list[0]] = list[1]
}
// anyGlobMatch is a helper function which checks if any of the given globs
// match the string.
anyGlobMatch := func(s string, patterns []string) bool {
for _, pattern := range patterns {
if matched, _ := filepath.Match(pattern, s); matched {
return true
}
}
return false
}
// Pull out any envvars that match the whitelist.
if len(c.Whitelist) > 0 {
newKeys := make([]string, 0, len(keys))
for _, k := range keys {
if anyGlobMatch(k, c.Whitelist) {
newKeys = append(newKeys, k)
}
}
keys = newKeys
}
// Remove any envvars that match the blacklist.
if len(c.Blacklist) > 0 {
newKeys := make([]string, 0, len(keys))
for _, k := range keys {
if !anyGlobMatch(k, c.Blacklist) {
newKeys = append(newKeys, k)
}
}
keys = newKeys
}
// Build the final list using only the filtered keys.
finalEnv := make([]string, 0, len(keys)+len(c.Custom))
for _, k := range keys {
finalEnv = append(finalEnv, k+"="+env[k])
}
// Append remaining custom environment.
finalEnv = append(finalEnv, c.Custom...)
return finalEnv
}
// Finalize ensures there no nil pointers.
func (c *EnvConfig) Finalize() {
if c.Blacklist == nil {
c.Blacklist = []string{}
}
if c.Custom == nil {
c.Custom = []string{}
}
if c.Pristine == nil {
c.Pristine = Bool(false)
}
if c.Whitelist == nil {
c.Whitelist = []string{}
}
}
// GoString defines the printable version of this struct.
func (c *EnvConfig) GoString() string {
if c == nil {
return "(*EnvConfig)(nil)"
}
return fmt.Sprintf("&EnvConfig{"+
"Blacklist:%v, "+
"Custom:%v, "+
"Pristine:%s, "+
"Whitelist:%v"+
"}",
c.Blacklist,
c.Custom,
BoolGoString(c.Pristine),
c.Whitelist,
)
}

View File

@ -0,0 +1,216 @@
package config
import (
"fmt"
"os"
"syscall"
"time"
)
const (
// DefaultExecKillSignal is the default signal to send to the process to
// tell it to gracefully terminate.
DefaultExecKillSignal = syscall.SIGINT
// DefaultExecKillTimeout is the maximum amount of time to wait for the
// process to gracefully terminate before force-killing it.
DefaultExecKillTimeout = 30 * time.Second
// DefaultExecTimeout is the default amount of time to wait for a
// command to exit. By default, this is disabled, which means the command
// is allowed to run for an infinite amount of time.
DefaultExecTimeout = 0 * time.Second
)
var (
// DefaultExecReloadSignal is the default signal to send to the process to
// tell it to reload its configuration.
DefaultExecReloadSignal = (os.Signal)(nil)
)
// ExecConfig is used to configure the application when it runs in
// exec/supervise mode.
type ExecConfig struct {
// Command is the command to execute and watch as a child process.
Command *string `mapstructure:"command"`
// Enabled controls if this exec is enabled.
Enabled *bool `mapstructure:"enabled"`
// EnvConfig is the environmental customizations.
Env *EnvConfig `mapstructure:"env"`
// KillSignal is the signal to send to the command to kill it gracefully. The
// default value is "SIGTERM".
KillSignal *os.Signal `mapstructure:"kill_signal"`
// KillTimeout is the amount of time to give the process to cleanup before
// hard-killing it.
KillTimeout *time.Duration `mapstructure:"kill_timeout"`
// ReloadSignal is the signal to send to the child process when a template
// changes. This tells the child process that templates have
ReloadSignal *os.Signal `mapstructure:"reload_signal"`
// Splay is the maximum amount of random time to wait to signal or kill the
// process. By default this is disabled, but it can be set to low values to
// reduce the "thundering herd" problem where all tasks are restarted at once.
Splay *time.Duration `mapstructure:"splay"`
// Timeout is the maximum amount of time to wait for a command to complete.
// By default, this is 0, which means "wait forever".
Timeout *time.Duration `mapstructure:"timeout"`
}
// DefaultExecConfig returns a configuration that is populated with the
// default values.
func DefaultExecConfig() *ExecConfig {
return &ExecConfig{
Env: DefaultEnvConfig(),
}
}
// Copy returns a deep copy of this configuration.
func (c *ExecConfig) Copy() *ExecConfig {
if c == nil {
return nil
}
var o ExecConfig
o.Command = c.Command
o.Enabled = c.Enabled
if c.Env != nil {
o.Env = c.Env.Copy()
}
o.KillSignal = c.KillSignal
o.KillTimeout = c.KillTimeout
o.ReloadSignal = c.ReloadSignal
o.Splay = c.Splay
o.Timeout = c.Timeout
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *ExecConfig) Merge(o *ExecConfig) *ExecConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Command != nil {
r.Command = o.Command
}
if o.Enabled != nil {
r.Enabled = o.Enabled
}
if o.Env != nil {
r.Env = r.Env.Merge(o.Env)
}
if o.KillSignal != nil {
r.KillSignal = o.KillSignal
}
if o.KillTimeout != nil {
r.KillTimeout = o.KillTimeout
}
if o.ReloadSignal != nil {
r.ReloadSignal = o.ReloadSignal
}
if o.Splay != nil {
r.Splay = o.Splay
}
if o.Timeout != nil {
r.Timeout = o.Timeout
}
return r
}
// Finalize ensures there no nil pointers.
func (c *ExecConfig) Finalize() {
if c.Enabled == nil {
c.Enabled = Bool(StringPresent(c.Command))
}
if c.Command == nil {
c.Command = String("")
}
if c.Env == nil {
c.Env = DefaultEnvConfig()
}
c.Env.Finalize()
if c.KillSignal == nil {
c.KillSignal = Signal(DefaultExecKillSignal)
}
if c.KillTimeout == nil {
c.KillTimeout = TimeDuration(DefaultExecKillTimeout)
}
if c.ReloadSignal == nil {
c.ReloadSignal = Signal(DefaultExecReloadSignal)
}
if c.Splay == nil {
c.Splay = TimeDuration(0 * time.Second)
}
if c.Timeout == nil {
c.Timeout = TimeDuration(DefaultExecTimeout)
}
}
// GoString defines the printable version of this struct.
func (c *ExecConfig) GoString() string {
if c == nil {
return "(*ExecConfig)(nil)"
}
return fmt.Sprintf("&ExecConfig{"+
"Command:%s, "+
"Enabled:%s, "+
"Env:%#v, "+
"KillSignal:%s, "+
"KillTimeout:%s, "+
"ReloadSignal:%s, "+
"Splay:%s, "+
"Timeout:%s"+
"}",
StringGoString(c.Command),
BoolGoString(c.Enabled),
c.Env,
SignalGoString(c.KillSignal),
TimeDurationGoString(c.KillTimeout),
SignalGoString(c.ReloadSignal),
TimeDurationGoString(c.Splay),
TimeDurationGoString(c.Timeout),
)
}

View File

@ -0,0 +1,75 @@
package config
import (
"log"
"os"
"reflect"
"strconv"
"github.com/mitchellh/mapstructure"
)
// StringToFileModeFunc returns a function that converts strings to os.FileMode
// value. This is designed to be used with mapstructure for parsing out a
// filemode value.
func StringToFileModeFunc() mapstructure.DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(os.FileMode(0)) {
return data, nil
}
// Convert it by parsing
v, err := strconv.ParseUint(data.(string), 8, 12)
if err != nil {
return data, err
}
return os.FileMode(v), nil
}
}
// StringToWaitDurationHookFunc returns a function that converts strings to wait
// value. This is designed to be used with mapstructure for parsing out a wait
// value.
func StringToWaitDurationHookFunc() mapstructure.DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(WaitConfig{}) {
return data, nil
}
// Convert it by parsing
return ParseWaitConfig(data.(string))
}
}
// ConsulStringToStructFunc checks if the value set for the key should actually
// be a struct and sets the appropriate value in the struct. This is for
// backwards-compatability with older versions of Consul Template.
func ConsulStringToStructFunc() mapstructure.DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if t == reflect.TypeOf(ConsulConfig{}) && f.Kind() == reflect.String {
log.Println("[WARN] consul now accepts a stanza instead of a string. " +
"Update your configuration files and change consul = \"\" to " +
"consul { } instead.")
return &ConsulConfig{
Address: String(data.(string)),
}, nil
}
return data, nil
}
}

View File

@ -0,0 +1,170 @@
package config
import (
"fmt"
"math"
"time"
)
const (
// DefaultRetryAttempts is the default number of maximum retry attempts.
DefaultRetryAttempts = 12
// DefaultRetryBackoff is the default base for the exponential backoff
// algorithm.
DefaultRetryBackoff = 250 * time.Millisecond
// DefaultRetryMaxBackoff is the default maximum of backoff time
DefaultRetryMaxBackoff = 1 * time.Minute
)
// RetryFunc is the signature of a function that supports retries.
type RetryFunc func(int) (bool, time.Duration)
// RetryConfig is a shared configuration for upstreams that support retires on
// failure.
type RetryConfig struct {
// Attempts is the total number of maximum attempts to retry before letting
// the error fall through.
// 0 means unlimited.
Attempts *int
// Backoff is the base of the exponentialbackoff. This number will be
// multiplied by the next power of 2 on each iteration.
Backoff *time.Duration
// MaxBackoff is an upper limit to the sleep time between retries
// A MaxBackoff of zero means there is no limit to the exponential growth of the backoff.
MaxBackoff *time.Duration `mapstructure:"max_backoff"`
// Enabled signals if this retry is enabled.
Enabled *bool
}
// DefaultRetryConfig returns a configuration that is populated with the
// default values.
func DefaultRetryConfig() *RetryConfig {
return &RetryConfig{}
}
// Copy returns a deep copy of this configuration.
func (c *RetryConfig) Copy() *RetryConfig {
if c == nil {
return nil
}
var o RetryConfig
o.Attempts = c.Attempts
o.Backoff = c.Backoff
o.MaxBackoff = c.MaxBackoff
o.Enabled = c.Enabled
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *RetryConfig) Merge(o *RetryConfig) *RetryConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Attempts != nil {
r.Attempts = o.Attempts
}
if o.Backoff != nil {
r.Backoff = o.Backoff
}
if o.MaxBackoff != nil {
r.MaxBackoff = o.MaxBackoff
}
if o.Enabled != nil {
r.Enabled = o.Enabled
}
return r
}
// RetryFunc returns the retry function associated with this configuration.
func (c *RetryConfig) RetryFunc() RetryFunc {
return func(retry int) (bool, time.Duration) {
if !BoolVal(c.Enabled) {
return false, 0
}
if IntVal(c.Attempts) > 0 && retry > IntVal(c.Attempts)-1 {
return false, 0
}
baseSleep := TimeDurationVal(c.Backoff)
maxSleep := TimeDurationVal(c.MaxBackoff)
if maxSleep > 0 {
attemptsTillMaxBackoff := int(math.Log2(maxSleep.Seconds() / baseSleep.Seconds()))
if retry > attemptsTillMaxBackoff {
return true, maxSleep
}
}
base := math.Pow(2, float64(retry))
sleep := time.Duration(base) * baseSleep
return true, sleep
}
}
// Finalize ensures there no nil pointers.
func (c *RetryConfig) Finalize() {
if c.Attempts == nil {
c.Attempts = Int(DefaultRetryAttempts)
}
if c.Backoff == nil {
c.Backoff = TimeDuration(DefaultRetryBackoff)
}
if c.MaxBackoff == nil {
c.MaxBackoff = TimeDuration(DefaultRetryMaxBackoff)
}
if c.Enabled == nil {
c.Enabled = Bool(true)
}
}
// GoString defines the printable version of this struct.
func (c *RetryConfig) GoString() string {
if c == nil {
return "(*RetryConfig)(nil)"
}
return fmt.Sprintf("&RetryConfig{"+
"Attempts:%s, "+
"Backoff:%s, "+
"MaxBackoff:%s, "+
"Enabled:%s"+
"}",
IntGoString(c.Attempts),
TimeDurationGoString(c.Backoff),
TimeDurationGoString(c.MaxBackoff),
BoolGoString(c.Enabled),
)
}

View File

@ -0,0 +1,153 @@
package config
import "fmt"
const (
// DefaultSSLVerify is the default value for SSL verification.
DefaultSSLVerify = true
)
// SSLConfig is the configuration for SSL.
type SSLConfig struct {
CaCert *string `mapstructure:"ca_cert"`
CaPath *string `mapstructure:"ca_path"`
Cert *string `mapstructure:"cert"`
Enabled *bool `mapstructure:"enabled"`
Key *string `mapstructure:"key"`
ServerName *string `mapstructure:"server_name"`
Verify *bool `mapstructure:"verify"`
}
// DefaultSSLConfig returns a configuration that is populated with the
// default values.
func DefaultSSLConfig() *SSLConfig {
return &SSLConfig{}
}
// Copy returns a deep copy of this configuration.
func (c *SSLConfig) Copy() *SSLConfig {
if c == nil {
return nil
}
var o SSLConfig
o.CaCert = c.CaCert
o.CaPath = c.CaPath
o.Cert = c.Cert
o.Enabled = c.Enabled
o.Key = c.Key
o.ServerName = c.ServerName
o.Verify = c.Verify
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *SSLConfig) Merge(o *SSLConfig) *SSLConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Cert != nil {
r.Cert = o.Cert
}
if o.CaCert != nil {
r.CaCert = o.CaCert
}
if o.CaPath != nil {
r.CaPath = o.CaPath
}
if o.Enabled != nil {
r.Enabled = o.Enabled
}
if o.Key != nil {
r.Key = o.Key
}
if o.ServerName != nil {
r.ServerName = o.ServerName
}
if o.Verify != nil {
r.Verify = o.Verify
}
return r
}
// Finalize ensures there no nil pointers.
func (c *SSLConfig) Finalize() {
if c.Enabled == nil {
c.Enabled = Bool(false ||
StringPresent(c.Cert) ||
StringPresent(c.CaCert) ||
StringPresent(c.CaPath) ||
StringPresent(c.Key) ||
StringPresent(c.ServerName) ||
BoolPresent(c.Verify))
}
if c.Cert == nil {
c.Cert = String("")
}
if c.CaCert == nil {
c.CaCert = String("")
}
if c.CaPath == nil {
c.CaPath = String("")
}
if c.Key == nil {
c.Key = String("")
}
if c.ServerName == nil {
c.ServerName = String("")
}
if c.Verify == nil {
c.Verify = Bool(DefaultSSLVerify)
}
}
// GoString defines the printable version of this struct.
func (c *SSLConfig) GoString() string {
if c == nil {
return "(*SSLConfig)(nil)"
}
return fmt.Sprintf("&SSLConfig{"+
"CaCert:%s, "+
"CaPath:%s, "+
"Cert:%s, "+
"Enabled:%s, "+
"Key:%s, "+
"ServerName:%s, "+
"Verify:%s"+
"}",
StringGoString(c.CaCert),
StringGoString(c.CaPath),
StringGoString(c.Cert),
BoolGoString(c.Enabled),
StringGoString(c.Key),
StringGoString(c.ServerName),
BoolGoString(c.Verify),
)
}

View File

@ -0,0 +1,87 @@
package config
import "fmt"
const (
// DefaultSyslogFacility is the default facility to log to.
DefaultSyslogFacility = "LOCAL0"
)
// SyslogConfig is the configuration for syslog.
type SyslogConfig struct {
Enabled *bool `mapstructure:"enabled"`
Facility *string `mapstructure:"facility"`
}
// DefaultSyslogConfig returns a configuration that is populated with the
// default values.
func DefaultSyslogConfig() *SyslogConfig {
return &SyslogConfig{}
}
// Copy returns a deep copy of this configuration.
func (c *SyslogConfig) Copy() *SyslogConfig {
if c == nil {
return nil
}
var o SyslogConfig
o.Enabled = c.Enabled
o.Facility = c.Facility
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *SyslogConfig) Merge(o *SyslogConfig) *SyslogConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Enabled != nil {
r.Enabled = o.Enabled
}
if o.Facility != nil {
r.Facility = o.Facility
}
return r
}
// Finalize ensures there no nil pointers.
func (c *SyslogConfig) Finalize() {
if c.Enabled == nil {
c.Enabled = Bool(StringPresent(c.Facility))
}
if c.Facility == nil {
c.Facility = String(DefaultSyslogFacility)
}
}
// GoString defines the printable version of this struct.
func (c *SyslogConfig) GoString() string {
if c == nil {
return "(*SyslogConfig)(nil)"
}
return fmt.Sprintf("&SyslogConfig{"+
"Enabled:%s, "+
"Facility:%s"+
"}",
BoolGoString(c.Enabled),
StringGoString(c.Facility),
)
}

View File

@ -0,0 +1,458 @@
package config
import (
"errors"
"fmt"
"os"
"regexp"
"strings"
"time"
)
const (
// DefaultTemplateCommandTimeout is the amount of time to wait for a command
// to return.
DefaultTemplateCommandTimeout = 30 * time.Second
)
var (
// ErrTemplateStringEmpty is the error returned with the template contents
// are empty.
ErrTemplateStringEmpty = errors.New("template: cannot be empty")
// configTemplateRe is the pattern to split the config template syntax.
configTemplateRe = regexp.MustCompile("([a-zA-Z]:)?([^:]+)")
)
// TemplateConfig is a representation of a template on disk, as well as the
// associated commands and reload instructions.
type TemplateConfig struct {
// Backup determines if this template should retain a backup. The default
// value is false.
Backup *bool `mapstructure:"backup"`
// Command is the arbitrary command to execute after a template has
// successfully rendered. This is DEPRECATED. Use Exec instead.
Command *string `mapstructure:"command"`
// CommandTimeout is the amount of time to wait for the command to finish
// before force-killing it. This is DEPRECATED. Use Exec instead.
CommandTimeout *time.Duration `mapstructure:"command_timeout"`
// Contents are the raw template contents to evaluate. Either this or Source
// must be specified, but not both.
Contents *string `mapstructure:"contents"`
// CreateDestDirs tells Consul Template to create the parent directories of
// the destination path if they do not exist. The default value is true.
CreateDestDirs *bool `mapstructure:"create_dest_dirs"`
// Destination is the location on disk where the template should be rendered.
// This is required unless running in debug/dry mode.
Destination *string `mapstructure:"destination"`
// ErrMissingKey is used to control how the template behaves when attempting
// to index a struct or map key that does not exist.
ErrMissingKey *bool `mapstructure:"error_on_missing_key"`
// Exec is the configuration for the command to run when the template renders
// successfully.
Exec *ExecConfig `mapstructure:"exec"`
// Perms are the file system permissions to use when creating the file on
// disk. This is useful for when files contain sensitive information, such as
// secrets from Vault.
Perms *os.FileMode `mapstructure:"perms"`
// Source is the path on disk to the template contents to evaluate. Either
// this or Contents should be specified, but not both.
Source *string `mapstructure:"source"`
// Wait configures per-template quiescence timers.
Wait *WaitConfig `mapstructure:"wait"`
// LeftDelim and RightDelim are optional configurations to control what
// delimiter is utilized when parsing the template.
LeftDelim *string `mapstructure:"left_delimiter"`
RightDelim *string `mapstructure:"right_delimiter"`
// FunctionBlacklist is a list of functions that this template is not
// permitted to run.
FunctionBlacklist []string `mapstructure:"function_blacklist"`
// SandboxPath adds a prefix to any path provided to the `file` function
// and causes an error if a relative path tries to traverse outside that
// prefix.
SandboxPath *string `mapstructure:"sandbox_path"`
}
// DefaultTemplateConfig returns a configuration that is populated with the
// default values.
func DefaultTemplateConfig() *TemplateConfig {
return &TemplateConfig{
Exec: DefaultExecConfig(),
Wait: DefaultWaitConfig(),
}
}
// Copy returns a deep copy of this configuration.
func (c *TemplateConfig) Copy() *TemplateConfig {
if c == nil {
return nil
}
var o TemplateConfig
o.Backup = c.Backup
o.Command = c.Command
o.CommandTimeout = c.CommandTimeout
o.Contents = c.Contents
o.CreateDestDirs = c.CreateDestDirs
o.Destination = c.Destination
o.ErrMissingKey = c.ErrMissingKey
if c.Exec != nil {
o.Exec = c.Exec.Copy()
}
o.Perms = c.Perms
o.Source = c.Source
if c.Wait != nil {
o.Wait = c.Wait.Copy()
}
o.LeftDelim = c.LeftDelim
o.RightDelim = c.RightDelim
for _, fun := range c.FunctionBlacklist {
o.FunctionBlacklist = append(o.FunctionBlacklist, fun)
}
o.SandboxPath = c.SandboxPath
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *TemplateConfig) Merge(o *TemplateConfig) *TemplateConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Backup != nil {
r.Backup = o.Backup
}
if o.Command != nil {
r.Command = o.Command
}
if o.CommandTimeout != nil {
r.CommandTimeout = o.CommandTimeout
}
if o.Contents != nil {
r.Contents = o.Contents
}
if o.CreateDestDirs != nil {
r.CreateDestDirs = o.CreateDestDirs
}
if o.Destination != nil {
r.Destination = o.Destination
}
if o.ErrMissingKey != nil {
r.ErrMissingKey = o.ErrMissingKey
}
if o.Exec != nil {
r.Exec = r.Exec.Merge(o.Exec)
}
if o.Perms != nil {
r.Perms = o.Perms
}
if o.Source != nil {
r.Source = o.Source
}
if o.Wait != nil {
r.Wait = r.Wait.Merge(o.Wait)
}
if o.LeftDelim != nil {
r.LeftDelim = o.LeftDelim
}
if o.RightDelim != nil {
r.RightDelim = o.RightDelim
}
for _, fun := range o.FunctionBlacklist {
r.FunctionBlacklist = append(r.FunctionBlacklist, fun)
}
if o.SandboxPath != nil {
r.SandboxPath = o.SandboxPath
}
return r
}
// Finalize ensures the configuration has no nil pointers and sets default
// values.
func (c *TemplateConfig) Finalize() {
if c.Backup == nil {
c.Backup = Bool(false)
}
if c.Command == nil {
c.Command = String("")
}
if c.CommandTimeout == nil {
c.CommandTimeout = TimeDuration(DefaultTemplateCommandTimeout)
}
if c.Contents == nil {
c.Contents = String("")
}
if c.CreateDestDirs == nil {
c.CreateDestDirs = Bool(true)
}
if c.Destination == nil {
c.Destination = String("")
}
if c.ErrMissingKey == nil {
c.ErrMissingKey = Bool(false)
}
if c.Exec == nil {
c.Exec = DefaultExecConfig()
}
// Backwards compat for specifying command directly
if c.Exec.Command == nil && c.Command != nil {
c.Exec.Command = c.Command
}
if c.Exec.Timeout == nil && c.CommandTimeout != nil {
c.Exec.Timeout = c.CommandTimeout
}
c.Exec.Finalize()
if c.Perms == nil {
c.Perms = FileMode(0)
}
if c.Source == nil {
c.Source = String("")
}
if c.Wait == nil {
c.Wait = DefaultWaitConfig()
}
c.Wait.Finalize()
if c.LeftDelim == nil {
c.LeftDelim = String("")
}
if c.RightDelim == nil {
c.RightDelim = String("")
}
if c.SandboxPath == nil {
c.SandboxPath = String("")
}
}
// GoString defines the printable version of this struct.
func (c *TemplateConfig) GoString() string {
if c == nil {
return "(*TemplateConfig)(nil)"
}
return fmt.Sprintf("&TemplateConfig{"+
"Backup:%s, "+
"Command:%s, "+
"CommandTimeout:%s, "+
"Contents:%s, "+
"CreateDestDirs:%s, "+
"Destination:%s, "+
"ErrMissingKey:%s, "+
"Exec:%#v, "+
"Perms:%s, "+
"Source:%s, "+
"Wait:%#v, "+
"LeftDelim:%s, "+
"RightDelim:%s"+
"FunctionBlacklist:%s"+
"SandboxPath:%s"+
"}",
BoolGoString(c.Backup),
StringGoString(c.Command),
TimeDurationGoString(c.CommandTimeout),
StringGoString(c.Contents),
BoolGoString(c.CreateDestDirs),
StringGoString(c.Destination),
BoolGoString(c.ErrMissingKey),
c.Exec,
FileModeGoString(c.Perms),
StringGoString(c.Source),
c.Wait,
StringGoString(c.LeftDelim),
StringGoString(c.RightDelim),
c.FunctionBlacklist,
StringGoString(c.SandboxPath),
)
}
// Display is the human-friendly form of this configuration. It tries to
// describe this template in as much detail as possible in a single line, so
// log consumers can uniquely identify it.
func (c *TemplateConfig) Display() string {
if c == nil {
return ""
}
source := c.Source
if StringPresent(c.Contents) {
source = String("(dynamic)")
}
return fmt.Sprintf("%q => %q",
StringVal(source),
StringVal(c.Destination),
)
}
// TemplateConfigs is a collection of TemplateConfigs
type TemplateConfigs []*TemplateConfig
// DefaultTemplateConfigs returns a configuration that is populated with the
// default values.
func DefaultTemplateConfigs() *TemplateConfigs {
return &TemplateConfigs{}
}
// Copy returns a deep copy of this configuration.
func (c *TemplateConfigs) Copy() *TemplateConfigs {
o := make(TemplateConfigs, len(*c))
for i, t := range *c {
o[i] = t.Copy()
}
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *TemplateConfigs) Merge(o *TemplateConfigs) *TemplateConfigs {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
*r = append(*r, *o...)
return r
}
// Finalize ensures the configuration has no nil pointers and sets default
// values.
func (c *TemplateConfigs) Finalize() {
if c == nil {
*c = *DefaultTemplateConfigs()
}
for _, t := range *c {
t.Finalize()
}
}
// GoString defines the printable version of this struct.
func (c *TemplateConfigs) GoString() string {
if c == nil {
return "(*TemplateConfigs)(nil)"
}
s := make([]string, len(*c))
for i, t := range *c {
s[i] = t.GoString()
}
return "{" + strings.Join(s, ", ") + "}"
}
// ParseTemplateConfig parses a string in the form source:destination:command
// into a TemplateConfig.
func ParseTemplateConfig(s string) (*TemplateConfig, error) {
if len(strings.TrimSpace(s)) < 1 {
return nil, ErrTemplateStringEmpty
}
var source, destination, command string
parts := configTemplateRe.FindAllString(s, -1)
switch len(parts) {
case 1:
source = parts[0]
case 2:
source, destination = parts[0], parts[1]
case 3:
source, destination, command = parts[0], parts[1], parts[2]
default:
source, destination = parts[0], parts[1]
command = strings.Join(parts[2:], ":")
}
var sourcePtr, destinationPtr, commandPtr *string
if source != "" {
sourcePtr = String(source)
}
if destination != "" {
destinationPtr = String(destination)
}
if command != "" {
commandPtr = String(command)
}
return &TemplateConfig{
Source: sourcePtr,
Destination: destinationPtr,
Command: commandPtr,
}, nil
}

View File

@ -0,0 +1,188 @@
package config
import (
"fmt"
"runtime"
"time"
)
const (
// DefaultDialKeepAlive is the default amount of time to keep alive
// connections.
DefaultDialKeepAlive = 30 * time.Second
// DefaultDialTimeout is the amount of time to attempt to dial before timing
// out.
DefaultDialTimeout = 30 * time.Second
// DefaultIdleConnTimeout is the default connection timeout for idle
// connections.
DefaultIdleConnTimeout = 90 * time.Second
// DefaultMaxIdleConns is the default number of maximum idle connections.
DefaultMaxIdleConns = 100
// DefaultTLSHandshakeTimeout is the amount of time to negotiate the TLS
// handshake.
DefaultTLSHandshakeTimeout = 10 * time.Second
)
var (
// DefaultMaxIdleConnsPerHost is the default number of idle connections to use
// per host.
DefaultMaxIdleConnsPerHost = runtime.GOMAXPROCS(0) + 1
)
// TransportConfig is the configuration to tune low-level APIs for the
// interactions on the wire.
type TransportConfig struct {
// DialKeepAlive is the amount of time for keep-alives.
DialKeepAlive *time.Duration `mapstructure:"dial_keep_alive"`
// DialTimeout is the amount of time to wait to establish a connection.
DialTimeout *time.Duration `mapstructure:"dial_timeout"`
// DisableKeepAlives determines if keep-alives should be used. Disabling this
// significantly decreases performance.
DisableKeepAlives *bool `mapstructure:"disable_keep_alives"`
// IdleConnTimeout is the timeout for idle connections.
IdleConnTimeout *time.Duration `mapstructure:"idle_conn_timeout"`
// MaxIdleConns is the maximum number of total idle connections.
MaxIdleConns *int `mapstructure:"max_idle_conns"`
// MaxIdleConnsPerHost is the maximum number of idle connections per remote
// host.
MaxIdleConnsPerHost *int `mapstructure:"max_idle_conns_per_host"`
// TLSHandshakeTimeout is the amount of time to wait to complete the TLS
// handshake.
TLSHandshakeTimeout *time.Duration `mapstructure:"tls_handshake_timeout"`
}
// DefaultTransportConfig returns a configuration that is populated with the
// default values.
func DefaultTransportConfig() *TransportConfig {
return &TransportConfig{}
}
// Copy returns a deep copy of this configuration.
func (c *TransportConfig) Copy() *TransportConfig {
if c == nil {
return nil
}
var o TransportConfig
o.DialKeepAlive = c.DialKeepAlive
o.DialTimeout = c.DialTimeout
o.DisableKeepAlives = c.DisableKeepAlives
o.IdleConnTimeout = c.IdleConnTimeout
o.MaxIdleConns = c.MaxIdleConns
o.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost
o.TLSHandshakeTimeout = c.TLSHandshakeTimeout
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *TransportConfig) Merge(o *TransportConfig) *TransportConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.DialKeepAlive != nil {
r.DialKeepAlive = o.DialKeepAlive
}
if o.DialTimeout != nil {
r.DialTimeout = o.DialTimeout
}
if o.DisableKeepAlives != nil {
r.DisableKeepAlives = o.DisableKeepAlives
}
if o.IdleConnTimeout != nil {
r.IdleConnTimeout = o.IdleConnTimeout
}
if o.MaxIdleConns != nil {
r.MaxIdleConns = o.MaxIdleConns
}
if o.MaxIdleConnsPerHost != nil {
r.MaxIdleConnsPerHost = o.MaxIdleConnsPerHost
}
if o.TLSHandshakeTimeout != nil {
r.TLSHandshakeTimeout = o.TLSHandshakeTimeout
}
return r
}
// Finalize ensures there no nil pointers.
func (c *TransportConfig) Finalize() {
if c.DialKeepAlive == nil {
c.DialKeepAlive = TimeDuration(DefaultDialKeepAlive)
}
if c.DialTimeout == nil {
c.DialTimeout = TimeDuration(DefaultDialTimeout)
}
if c.DisableKeepAlives == nil {
c.DisableKeepAlives = Bool(false)
}
if c.IdleConnTimeout == nil {
c.IdleConnTimeout = TimeDuration(DefaultIdleConnTimeout)
}
if c.MaxIdleConns == nil {
c.MaxIdleConns = Int(DefaultMaxIdleConns)
}
if c.MaxIdleConnsPerHost == nil {
c.MaxIdleConnsPerHost = Int(DefaultMaxIdleConnsPerHost)
}
if c.TLSHandshakeTimeout == nil {
c.TLSHandshakeTimeout = TimeDuration(DefaultTLSHandshakeTimeout)
}
}
// GoString defines the printable version of this struct.
func (c *TransportConfig) GoString() string {
if c == nil {
return "(*TransportConfig)(nil)"
}
return fmt.Sprintf("&TransportConfig{"+
"DialKeepAlive:%s, "+
"DialTimeout:%s, "+
"DisableKeepAlives:%t, "+
"MaxIdleConnsPerHost:%d, "+
"TLSHandshakeTimeout:%s"+
"}",
TimeDurationVal(c.DialKeepAlive),
TimeDurationVal(c.DialTimeout),
BoolVal(c.DisableKeepAlives),
IntVal(c.MaxIdleConnsPerHost),
TimeDurationVal(c.TLSHandshakeTimeout),
)
}

View File

@ -0,0 +1,327 @@
package config
import (
"fmt"
"time"
"github.com/hashicorp/vault/api"
)
const (
// XXX Change use to api.EnvVaultSkipVerify once we've updated vendored
// vault to version 1.1.0 or newer.
EnvVaultSkipVerify = "VAULT_SKIP_VERIFY"
// DefaultVaultGrace is the default grace period before which to read a new
// secret from Vault. If a lease is due to expire in 15 seconds, Consul
// Template will read a new secret at that time minus this value.
DefaultVaultGrace = 15 * time.Second
// DefaultVaultRenewToken is the default value for if the Vault token should
// be renewed.
DefaultVaultRenewToken = true
// DefaultVaultUnwrapToken is the default value for if the Vault token should
// be unwrapped.
DefaultVaultUnwrapToken = false
// DefaultVaultRetryBase is the default value for the base time to use for
// exponential backoff.
DefaultVaultRetryBase = 250 * time.Millisecond
// DefaultVaultRetryMaxAttempts is the default maximum number of attempts to
// retry before quitting.
DefaultVaultRetryMaxAttempts = 5
)
// VaultConfig is the configuration for connecting to a vault server.
type VaultConfig struct {
// Address is the URI to the Vault server.
Address *string `mapstructure:"address"`
// Enabled controls whether the Vault integration is active.
Enabled *bool `mapstructure:"enabled"`
// Grace is the amount of time before a lease is about to expire to force a
// new secret to be read.
Grace *time.Duration `mapstructure:"grace"`
// Namespace is the Vault namespace to use for reading/writing secrets. This can
// also be set via the VAULT_NAMESPACE environment variable.
Namespace *string `mapstructure:"namespace"`
// RenewToken renews the Vault token.
RenewToken *bool `mapstructure:"renew_token"`
// Retry is the configuration for specifying how to behave on failure.
Retry *RetryConfig `mapstructure:"retry"`
// SSL indicates we should use a secure connection while talking to Vault.
SSL *SSLConfig `mapstructure:"ssl"`
// Token is the Vault token to communicate with for requests. It may be
// a wrapped token or a real token. This can also be set via the VAULT_TOKEN
// environment variable, or via the VaultAgentTokenFile.
Token *string `mapstructure:"token" json:"-"`
// VaultAgentTokenFile is the path of file that contains a Vault Agent token.
// If vault_agent_token_file is specified:
// - Consul Template will not try to renew the Vault token.
// - Consul Template will periodically stat the file and update the token if it has
// changed.
VaultAgentTokenFile *string `mapstructure:"vault_agent_token_file" json:"-"`
// Transport configures the low-level network connection details.
Transport *TransportConfig `mapstructure:"transport"`
// UnwrapToken unwraps the provided Vault token as a wrapped token.
UnwrapToken *bool `mapstructure:"unwrap_token"`
}
// DefaultVaultConfig returns a configuration that is populated with the
// default values.
func DefaultVaultConfig() *VaultConfig {
v := &VaultConfig{
Retry: DefaultRetryConfig(),
SSL: DefaultSSLConfig(),
Transport: DefaultTransportConfig(),
}
// Force SSL when communicating with Vault.
v.SSL.Enabled = Bool(true)
return v
}
// Copy returns a deep copy of this configuration.
func (c *VaultConfig) Copy() *VaultConfig {
if c == nil {
return nil
}
var o VaultConfig
o.Address = c.Address
o.Enabled = c.Enabled
o.Grace = c.Grace
o.Namespace = c.Namespace
o.RenewToken = c.RenewToken
if c.Retry != nil {
o.Retry = c.Retry.Copy()
}
if c.SSL != nil {
o.SSL = c.SSL.Copy()
}
o.Token = c.Token
o.VaultAgentTokenFile = c.VaultAgentTokenFile
if c.Transport != nil {
o.Transport = c.Transport.Copy()
}
o.UnwrapToken = c.UnwrapToken
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *VaultConfig) Merge(o *VaultConfig) *VaultConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Address != nil {
r.Address = o.Address
}
if o.Enabled != nil {
r.Enabled = o.Enabled
}
if o.Grace != nil {
r.Grace = o.Grace
}
if o.Namespace != nil {
r.Namespace = o.Namespace
}
if o.RenewToken != nil {
r.RenewToken = o.RenewToken
}
if o.Retry != nil {
r.Retry = r.Retry.Merge(o.Retry)
}
if o.SSL != nil {
r.SSL = r.SSL.Merge(o.SSL)
}
if o.Token != nil {
r.Token = o.Token
}
if o.VaultAgentTokenFile != nil {
r.VaultAgentTokenFile = o.VaultAgentTokenFile
}
if o.Transport != nil {
r.Transport = r.Transport.Merge(o.Transport)
}
if o.UnwrapToken != nil {
r.UnwrapToken = o.UnwrapToken
}
return r
}
// Finalize ensures there no nil pointers.
func (c *VaultConfig) Finalize() {
if c.Address == nil {
c.Address = stringFromEnv([]string{
api.EnvVaultAddress,
}, "")
}
if c.Grace == nil {
c.Grace = TimeDuration(DefaultVaultGrace)
}
if c.Namespace == nil {
c.Namespace = stringFromEnv([]string{"VAULT_NAMESPACE"}, "")
}
if c.RenewToken == nil {
default_renew := DefaultVaultRenewToken
if c.VaultAgentTokenFile != nil {
default_renew = false
}
c.RenewToken = boolFromEnv([]string{
"VAULT_RENEW_TOKEN",
}, default_renew)
}
if c.Retry == nil {
c.Retry = DefaultRetryConfig()
}
c.Retry.Finalize()
// Vault has custom SSL settings
if c.SSL == nil {
c.SSL = DefaultSSLConfig()
}
if c.SSL.Enabled == nil {
c.SSL.Enabled = Bool(true)
}
if c.SSL.CaCert == nil {
c.SSL.CaCert = stringFromEnv([]string{api.EnvVaultCACert}, "")
}
if c.SSL.CaPath == nil {
c.SSL.CaPath = stringFromEnv([]string{api.EnvVaultCAPath}, "")
}
if c.SSL.Cert == nil {
c.SSL.Cert = stringFromEnv([]string{api.EnvVaultClientCert}, "")
}
if c.SSL.Key == nil {
c.SSL.Key = stringFromEnv([]string{api.EnvVaultClientKey}, "")
}
if c.SSL.ServerName == nil {
c.SSL.ServerName = stringFromEnv([]string{api.EnvVaultTLSServerName}, "")
}
if c.SSL.Verify == nil {
c.SSL.Verify = antiboolFromEnv([]string{
EnvVaultSkipVerify, api.EnvVaultInsecure}, true)
}
c.SSL.Finalize()
// Order of precedence
// 1. `vault_agent_token_file` configuration value
// 2. `token` configuration value`
// 3. `VAULT_TOKEN` environment variable
if c.Token == nil {
c.Token = stringFromEnv([]string{
"VAULT_TOKEN",
}, "")
}
if c.VaultAgentTokenFile == nil {
if StringVal(c.Token) == "" {
if homePath != "" {
c.Token = stringFromFile([]string{
homePath + "/.vault-token",
}, "")
}
}
} else {
c.Token = stringFromFile([]string{*c.VaultAgentTokenFile}, "")
}
if c.Transport == nil {
c.Transport = DefaultTransportConfig()
}
c.Transport.Finalize()
if c.UnwrapToken == nil {
c.UnwrapToken = boolFromEnv([]string{
"VAULT_UNWRAP_TOKEN",
}, DefaultVaultUnwrapToken)
}
if c.Enabled == nil {
c.Enabled = Bool(StringPresent(c.Address))
}
}
// GoString defines the printable version of this struct.
func (c *VaultConfig) GoString() string {
if c == nil {
return "(*VaultConfig)(nil)"
}
return fmt.Sprintf("&VaultConfig{"+
"Address:%s, "+
"Enabled:%s, "+
"Grace:%s, "+
"Namespace:%s,"+
"RenewToken:%s, "+
"Retry:%#v, "+
"SSL:%#v, "+
"Token:%t, "+
"VaultAgentTokenFile:%t, "+
"Transport:%#v, "+
"UnwrapToken:%s"+
"}",
StringGoString(c.Address),
BoolGoString(c.Enabled),
TimeDurationGoString(c.Grace),
StringGoString(c.Namespace),
BoolGoString(c.RenewToken),
c.Retry,
c.SSL,
StringPresent(c.Token),
StringPresent(c.VaultAgentTokenFile),
c.Transport,
BoolGoString(c.UnwrapToken),
)
}

View File

@ -0,0 +1,191 @@
package config
import (
"errors"
"fmt"
"strings"
"time"
)
var (
// ErrWaitStringEmpty is the error returned when wait is specified as an empty
// string.
ErrWaitStringEmpty = errors.New("wait: cannot be empty")
// ErrWaitInvalidFormat is the error returned when the wait is specified
// incorrectly.
ErrWaitInvalidFormat = errors.New("wait: invalid format")
// ErrWaitNegative is the error returned with the wait is negative.
ErrWaitNegative = errors.New("wait: cannot be negative")
// ErrWaitMinLTMax is the error returned with the minimum wait time is not
// less than the maximum wait time.
ErrWaitMinLTMax = errors.New("wait: min must be less than max")
)
// WaitConfig is the Min/Max duration used by the Watcher
type WaitConfig struct {
// Enabled determines if this wait is enabled.
Enabled *bool `mapstructure:"bool"`
// Min and Max are the minimum and maximum time, respectively, to wait for
// data changes before rendering a new template to disk.
Min *time.Duration `mapstructure:"min"`
Max *time.Duration `mapstructure:"max"`
}
// DefaultWaitConfig is the default configuration.
func DefaultWaitConfig() *WaitConfig {
return &WaitConfig{}
}
// Copy returns a deep copy of this configuration.
func (c *WaitConfig) Copy() *WaitConfig {
if c == nil {
return nil
}
var o WaitConfig
o.Enabled = c.Enabled
o.Min = c.Min
o.Max = c.Max
return &o
}
// Merge combines all values in this configuration with the values in the other
// configuration, with values in the other configuration taking precedence.
// Maps and slices are merged, most other values are overwritten. Complex
// structs define their own merge functionality.
func (c *WaitConfig) Merge(o *WaitConfig) *WaitConfig {
if c == nil {
if o == nil {
return nil
}
return o.Copy()
}
if o == nil {
return c.Copy()
}
r := c.Copy()
if o.Enabled != nil {
r.Enabled = o.Enabled
}
if o.Min != nil {
r.Min = o.Min
}
if o.Max != nil {
r.Max = o.Max
}
return r
}
// Finalize ensures there no nil pointers.
func (c *WaitConfig) Finalize() {
if c.Enabled == nil {
c.Enabled = Bool(TimeDurationPresent(c.Min))
}
if c.Min == nil {
c.Min = TimeDuration(0 * time.Second)
}
if c.Max == nil {
c.Max = TimeDuration(4 * *c.Min)
}
}
// GoString defines the printable version of this struct.
func (c *WaitConfig) GoString() string {
if c == nil {
return "(*WaitConfig)(nil)"
}
return fmt.Sprintf("&WaitConfig{"+
"Enabled:%s, "+
"Min:%s, "+
"Max:%s"+
"}",
BoolGoString(c.Enabled),
TimeDurationGoString(c.Min),
TimeDurationGoString(c.Max),
)
}
// ParseWaitConfig parses a string of the format `minimum(:maximum)` into a
// WaitConfig.
func ParseWaitConfig(s string) (*WaitConfig, error) {
s = strings.TrimSpace(s)
if len(s) < 1 {
return nil, ErrWaitStringEmpty
}
parts := strings.Split(s, ":")
var min, max time.Duration
var err error
switch len(parts) {
case 1:
min, err = time.ParseDuration(strings.TrimSpace(parts[0]))
if err != nil {
return nil, err
}
max = 4 * min
case 2:
min, err = time.ParseDuration(strings.TrimSpace(parts[0]))
if err != nil {
return nil, err
}
max, err = time.ParseDuration(strings.TrimSpace(parts[1]))
if err != nil {
return nil, err
}
default:
return nil, ErrWaitInvalidFormat
}
if min < 0 || max < 0 {
return nil, ErrWaitNegative
}
if max < min {
return nil, ErrWaitMinLTMax
}
var c WaitConfig
c.Min = TimeDuration(min)
c.Max = TimeDuration(max)
return &c, nil
}
// WaitVar implements the Flag.Value interface and allows the user to specify
// a watch interval using Go's flag parsing library.
type WaitVar WaitConfig
// Set sets the value in the format min[:max] for a wait timer.
func (w *WaitVar) Set(value string) error {
wait, err := ParseWaitConfig(value)
if err != nil {
return err
}
w.Min = wait.Min
w.Max = wait.Max
return nil
}
// String returns the string format for this wait variable
func (w *WaitVar) String() string {
return fmt.Sprintf("%s:%s", w.Min, w.Max)
}

View File

@ -0,0 +1,112 @@
package dependency
import (
"log"
"net/url"
"sort"
"time"
"github.com/hashicorp/consul/api"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*CatalogDatacentersQuery)(nil)
// CatalogDatacentersQuerySleepTime is the amount of time to sleep between
// queries, since the endpoint does not support blocking queries.
CatalogDatacentersQuerySleepTime = 15 * time.Second
)
// CatalogDatacentersQuery is the dependency to query all datacenters
type CatalogDatacentersQuery struct {
ignoreFailing bool
stopCh chan struct{}
}
// NewCatalogDatacentersQuery creates a new datacenter dependency.
func NewCatalogDatacentersQuery(ignoreFailing bool) (*CatalogDatacentersQuery, error) {
return &CatalogDatacentersQuery{
ignoreFailing: ignoreFailing,
stopCh: make(chan struct{}, 1),
}, nil
}
// Fetch queries the Consul API defined by the given client and returns a slice
// of strings representing the datacenters
func (d *CatalogDatacentersQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
opts = opts.Merge(&QueryOptions{})
log.Printf("[TRACE] %s: GET %s", d, &url.URL{
Path: "/v1/catalog/datacenters",
RawQuery: opts.String(),
})
// This is pretty ghetto, but the datacenters endpoint does not support
// blocking queries, so we are going to "fake it until we make it". When we
// first query, the LastIndex will be "0", meaning we should immediately
// return data, but future calls will include a LastIndex. If we have a
// LastIndex in the query metadata, sleep for 15 seconds before asking Consul
// again.
//
// This is probably okay given the frequency in which datacenters actually
// change, but is technically not edge-triggering.
if opts.WaitIndex != 0 {
log.Printf("[TRACE] %s: long polling for %s", d, CatalogDatacentersQuerySleepTime)
select {
case <-d.stopCh:
return nil, nil, ErrStopped
case <-time.After(CatalogDatacentersQuerySleepTime):
}
}
result, err := clients.Consul().Catalog().Datacenters()
if err != nil {
return nil, nil, errors.Wrapf(err, d.String())
}
// If the user opted in for skipping "down" datacenters, figure out which
// datacenters are down.
if d.ignoreFailing {
dcs := make([]string, 0, len(result))
for _, dc := range result {
if _, _, err := clients.Consul().Catalog().Services(&api.QueryOptions{
Datacenter: dc,
AllowStale: false,
RequireConsistent: true,
}); err == nil {
dcs = append(dcs, dc)
}
}
result = dcs
}
log.Printf("[TRACE] %s: returned %d results", d, len(result))
sort.Strings(result)
return respWithMetadata(result)
}
// CanShare returns if this dependency is shareable.
func (d *CatalogDatacentersQuery) CanShare() bool {
return true
}
// String returns the human-friendly version of this dependency.
func (d *CatalogDatacentersQuery) String() string {
return "catalog.datacenters"
}
// Stop terminates this dependency's fetch.
func (d *CatalogDatacentersQuery) Stop() {
close(d.stopCh)
}
// Type returns the type of this dependency.
func (d *CatalogDatacentersQuery) Type() Type {
return TypeConsul
}

View File

@ -0,0 +1,181 @@
package dependency
import (
"encoding/gob"
"fmt"
"log"
"net/url"
"regexp"
"sort"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*CatalogNodeQuery)(nil)
// CatalogNodeQueryRe is the regular expression to use.
CatalogNodeQueryRe = regexp.MustCompile(`\A` + nodeNameRe + dcRe + `\z`)
)
func init() {
gob.Register([]*CatalogNode{})
gob.Register([]*CatalogNodeService{})
}
// CatalogNodeQuery represents a single node from the Consul catalog.
type CatalogNodeQuery struct {
stopCh chan struct{}
dc string
name string
}
// CatalogNode is a wrapper around the node and its services.
type CatalogNode struct {
Node *Node
Services []*CatalogNodeService
}
// CatalogNodeService is a service on a single node.
type CatalogNodeService struct {
ID string
Service string
Tags ServiceTags
Meta map[string]string
Port int
Address string
EnableTagOverride bool
}
// NewCatalogNodeQuery parses the given string into a dependency. If the name is
// empty then the name of the local agent is used.
func NewCatalogNodeQuery(s string) (*CatalogNodeQuery, error) {
if s != "" && !CatalogNodeQueryRe.MatchString(s) {
return nil, fmt.Errorf("catalog.node: invalid format: %q", s)
}
m := regexpMatch(CatalogNodeQueryRe, s)
return &CatalogNodeQuery{
dc: m["dc"],
name: m["name"],
stopCh: make(chan struct{}, 1),
}, nil
}
// Fetch queries the Consul API defined by the given client and returns a
// of CatalogNode object.
func (d *CatalogNodeQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{
Datacenter: d.dc,
})
// Grab the name
name := d.name
if name == "" {
log.Printf("[TRACE] %s: getting local agent name", d)
var err error
name, err = clients.Consul().Agent().NodeName()
if err != nil {
return nil, nil, errors.Wrapf(err, d.String())
}
}
log.Printf("[TRACE] %s: GET %s", d, &url.URL{
Path: "/v1/catalog/node/" + name,
RawQuery: opts.String(),
})
node, qm, err := clients.Consul().Catalog().Node(name, opts.ToConsulOpts())
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
log.Printf("[TRACE] %s: returned response", d)
rm := &ResponseMetadata{
LastIndex: qm.LastIndex,
LastContact: qm.LastContact,
}
if node == nil {
log.Printf("[WARN] %s: no node exists with the name %q", d, name)
var node CatalogNode
return &node, rm, nil
}
services := make([]*CatalogNodeService, 0, len(node.Services))
for _, v := range node.Services {
services = append(services, &CatalogNodeService{
ID: v.ID,
Service: v.Service,
Tags: ServiceTags(deepCopyAndSortTags(v.Tags)),
Meta: v.Meta,
Port: v.Port,
Address: v.Address,
EnableTagOverride: v.EnableTagOverride,
})
}
sort.Stable(ByService(services))
detail := &CatalogNode{
Node: &Node{
ID: node.Node.ID,
Node: node.Node.Node,
Address: node.Node.Address,
Datacenter: node.Node.Datacenter,
TaggedAddresses: node.Node.TaggedAddresses,
Meta: node.Node.Meta,
},
Services: services,
}
return detail, rm, nil
}
// CanShare returns a boolean if this dependency is shareable.
func (d *CatalogNodeQuery) CanShare() bool {
return false
}
// String returns the human-friendly version of this dependency.
func (d *CatalogNodeQuery) String() string {
name := d.name
if d.dc != "" {
name = name + "@" + d.dc
}
if name == "" {
return "catalog.node"
}
return fmt.Sprintf("catalog.node(%s)", name)
}
// Stop halts the dependency's fetch function.
func (d *CatalogNodeQuery) Stop() {
close(d.stopCh)
}
// Type returns the type of this dependency.
func (d *CatalogNodeQuery) Type() Type {
return TypeConsul
}
// ByService is a sorter of node services by their service name and then ID.
type ByService []*CatalogNodeService
func (s ByService) Len() int { return len(s) }
func (s ByService) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ByService) Less(i, j int) bool {
if s[i].Service == s[j].Service {
return s[i].ID <= s[j].ID
}
return s[i].Service <= s[j].Service
}

View File

@ -0,0 +1,150 @@
package dependency
import (
"encoding/gob"
"fmt"
"log"
"net/url"
"regexp"
"sort"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*CatalogNodesQuery)(nil)
// CatalogNodesQueryRe is the regular expression to use.
CatalogNodesQueryRe = regexp.MustCompile(`\A` + dcRe + nearRe + `\z`)
)
func init() {
gob.Register([]*Node{})
}
// Node is a node entry in Consul
type Node struct {
ID string
Node string
Address string
Datacenter string
TaggedAddresses map[string]string
Meta map[string]string
}
// CatalogNodesQuery is the representation of all registered nodes in Consul.
type CatalogNodesQuery struct {
stopCh chan struct{}
dc string
near string
}
// NewCatalogNodesQuery parses the given string into a dependency. If the name is
// empty then the name of the local agent is used.
func NewCatalogNodesQuery(s string) (*CatalogNodesQuery, error) {
if !CatalogNodesQueryRe.MatchString(s) {
return nil, fmt.Errorf("catalog.nodes: invalid format: %q", s)
}
m := regexpMatch(CatalogNodesQueryRe, s)
return &CatalogNodesQuery{
dc: m["dc"],
near: m["near"],
stopCh: make(chan struct{}, 1),
}, nil
}
// Fetch queries the Consul API defined by the given client and returns a slice
// of Node objects
func (d *CatalogNodesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{
Datacenter: d.dc,
Near: d.near,
})
log.Printf("[TRACE] %s: GET %s", d, &url.URL{
Path: "/v1/catalog/nodes",
RawQuery: opts.String(),
})
n, qm, err := clients.Consul().Catalog().Nodes(opts.ToConsulOpts())
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
log.Printf("[TRACE] %s: returned %d results", d, len(n))
nodes := make([]*Node, 0, len(n))
for _, node := range n {
nodes = append(nodes, &Node{
ID: node.ID,
Node: node.Node,
Address: node.Address,
Datacenter: node.Datacenter,
TaggedAddresses: node.TaggedAddresses,
Meta: node.Meta,
})
}
// Sort unless the user explicitly asked for nearness
if d.near == "" {
sort.Stable(ByNode(nodes))
}
rm := &ResponseMetadata{
LastIndex: qm.LastIndex,
LastContact: qm.LastContact,
}
return nodes, rm, nil
}
// CanShare returns a boolean if this dependency is shareable.
func (d *CatalogNodesQuery) CanShare() bool {
return true
}
// String returns the human-friendly version of this dependency.
func (d *CatalogNodesQuery) String() string {
name := ""
if d.dc != "" {
name = name + "@" + d.dc
}
if d.near != "" {
name = name + "~" + d.near
}
if name == "" {
return "catalog.nodes"
}
return fmt.Sprintf("catalog.nodes(%s)", name)
}
// Stop halts the dependency's fetch function.
func (d *CatalogNodesQuery) Stop() {
close(d.stopCh)
}
// Type returns the type of this dependency.
func (d *CatalogNodesQuery) Type() Type {
return TypeConsul
}
// ByNode is a sortable list of nodes by name and then IP address.
type ByNode []*Node
func (s ByNode) Len() int { return len(s) }
func (s ByNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ByNode) Less(i, j int) bool {
if s[i].Node == s[j].Node {
return s[i].Address <= s[j].Address
}
return s[i].Node <= s[j].Node
}

View File

@ -0,0 +1,154 @@
package dependency
import (
"encoding/gob"
"fmt"
"log"
"net/url"
"regexp"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*CatalogServiceQuery)(nil)
// CatalogServiceQueryRe is the regular expression to use.
CatalogServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + dcRe + nearRe + `\z`)
)
func init() {
gob.Register([]*CatalogSnippet{})
}
// CatalogService is a catalog entry in Consul.
type CatalogService struct {
ID string
Node string
Address string
Datacenter string
TaggedAddresses map[string]string
NodeMeta map[string]string
ServiceID string
ServiceName string
ServiceAddress string
ServiceTags ServiceTags
ServiceMeta map[string]string
ServicePort int
}
// CatalogServiceQuery is the representation of a requested catalog services
// dependency from inside a template.
type CatalogServiceQuery struct {
stopCh chan struct{}
dc string
name string
near string
tag string
}
// NewCatalogServiceQuery parses a string into a CatalogServiceQuery.
func NewCatalogServiceQuery(s string) (*CatalogServiceQuery, error) {
if !CatalogServiceQueryRe.MatchString(s) {
return nil, fmt.Errorf("catalog.service: invalid format: %q", s)
}
m := regexpMatch(CatalogServiceQueryRe, s)
return &CatalogServiceQuery{
stopCh: make(chan struct{}, 1),
dc: m["dc"],
name: m["name"],
near: m["near"],
tag: m["tag"],
}, nil
}
// Fetch queries the Consul API defined by the given client and returns a slice
// of CatalogService objects.
func (d *CatalogServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{
Datacenter: d.dc,
Near: d.near,
})
u := &url.URL{
Path: "/v1/catalog/service/" + d.name,
RawQuery: opts.String(),
}
if d.tag != "" {
q := u.Query()
q.Set("tag", d.tag)
u.RawQuery = q.Encode()
}
log.Printf("[TRACE] %s: GET %s", d, u)
entries, qm, err := clients.Consul().Catalog().Service(d.name, d.tag, opts.ToConsulOpts())
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
log.Printf("[TRACE] %s: returned %d results", d, len(entries))
var list []*CatalogService
for _, s := range entries {
list = append(list, &CatalogService{
ID: s.ID,
Node: s.Node,
Address: s.Address,
Datacenter: s.Datacenter,
TaggedAddresses: s.TaggedAddresses,
NodeMeta: s.NodeMeta,
ServiceID: s.ServiceID,
ServiceName: s.ServiceName,
ServiceAddress: s.ServiceAddress,
ServiceTags: ServiceTags(deepCopyAndSortTags(s.ServiceTags)),
ServiceMeta: s.ServiceMeta,
ServicePort: s.ServicePort,
})
}
rm := &ResponseMetadata{
LastIndex: qm.LastIndex,
LastContact: qm.LastContact,
}
return list, rm, nil
}
// CanShare returns a boolean if this dependency is shareable.
func (d *CatalogServiceQuery) CanShare() bool {
return true
}
// String returns the human-friendly version of this dependency.
func (d *CatalogServiceQuery) String() string {
name := d.name
if d.tag != "" {
name = d.tag + "." + name
}
if d.dc != "" {
name = name + "@" + d.dc
}
if d.near != "" {
name = name + "~" + d.near
}
return fmt.Sprintf("catalog.service(%s)", name)
}
// Stop halts the dependency's fetch function.
func (d *CatalogServiceQuery) Stop() {
close(d.stopCh)
}
// Type returns the type of this dependency.
func (d *CatalogServiceQuery) Type() Type {
return TypeConsul
}

View File

@ -0,0 +1,129 @@
package dependency
import (
"encoding/gob"
"fmt"
"log"
"net/url"
"regexp"
"sort"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*CatalogServicesQuery)(nil)
// CatalogServicesQueryRe is the regular expression to use for CatalogNodesQuery.
CatalogServicesQueryRe = regexp.MustCompile(`\A` + dcRe + `\z`)
)
func init() {
gob.Register([]*CatalogSnippet{})
}
// CatalogSnippet is a catalog entry in Consul.
type CatalogSnippet struct {
Name string
Tags ServiceTags
}
// CatalogServicesQuery is the representation of a requested catalog service
// dependency from inside a template.
type CatalogServicesQuery struct {
stopCh chan struct{}
dc string
}
// NewCatalogServicesQuery parses a string of the format @dc.
func NewCatalogServicesQuery(s string) (*CatalogServicesQuery, error) {
if !CatalogServicesQueryRe.MatchString(s) {
return nil, fmt.Errorf("catalog.services: invalid format: %q", s)
}
m := regexpMatch(CatalogServicesQueryRe, s)
return &CatalogServicesQuery{
stopCh: make(chan struct{}, 1),
dc: m["dc"],
}, nil
}
// Fetch queries the Consul API defined by the given client and returns a slice
// of CatalogService objects.
func (d *CatalogServicesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{
Datacenter: d.dc,
})
log.Printf("[TRACE] %s: GET %s", d, &url.URL{
Path: "/v1/catalog/services",
RawQuery: opts.String(),
})
entries, qm, err := clients.Consul().Catalog().Services(opts.ToConsulOpts())
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
log.Printf("[TRACE] %s: returned %d results", d, len(entries))
var catalogServices []*CatalogSnippet
for name, tags := range entries {
catalogServices = append(catalogServices, &CatalogSnippet{
Name: name,
Tags: ServiceTags(deepCopyAndSortTags(tags)),
})
}
sort.Stable(ByName(catalogServices))
rm := &ResponseMetadata{
LastIndex: qm.LastIndex,
LastContact: qm.LastContact,
}
return catalogServices, rm, nil
}
// CanShare returns a boolean if this dependency is shareable.
func (d *CatalogServicesQuery) CanShare() bool {
return true
}
// String returns the human-friendly version of this dependency.
func (d *CatalogServicesQuery) String() string {
if d.dc != "" {
return fmt.Sprintf("catalog.services(@%s)", d.dc)
}
return "catalog.services"
}
// Stop halts the dependency's fetch function.
func (d *CatalogServicesQuery) Stop() {
close(d.stopCh)
}
// Type returns the type of this dependency.
func (d *CatalogServicesQuery) Type() Type {
return TypeConsul
}
// ByName is a sortable slice of CatalogService structs.
type ByName []*CatalogSnippet
func (s ByName) Len() int { return len(s) }
func (s ByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ByName) Less(i, j int) bool {
if s[i].Name <= s[j].Name {
return true
}
return false
}

View File

@ -0,0 +1,338 @@
package dependency
import (
"crypto/tls"
"fmt"
"log"
"net"
"net/http"
"sync"
"time"
consulapi "github.com/hashicorp/consul/api"
rootcerts "github.com/hashicorp/go-rootcerts"
vaultapi "github.com/hashicorp/vault/api"
)
// ClientSet is a collection of clients that dependencies use to communicate
// with remote services like Consul or Vault.
type ClientSet struct {
sync.RWMutex
vault *vaultClient
consul *consulClient
}
// consulClient is a wrapper around a real Consul API client.
type consulClient struct {
client *consulapi.Client
transport *http.Transport
}
// vaultClient is a wrapper around a real Vault API client.
type vaultClient struct {
client *vaultapi.Client
httpClient *http.Client
}
// CreateConsulClientInput is used as input to the CreateConsulClient function.
type CreateConsulClientInput struct {
Address string
Token string
AuthEnabled bool
AuthUsername string
AuthPassword string
SSLEnabled bool
SSLVerify bool
SSLCert string
SSLKey string
SSLCACert string
SSLCAPath string
ServerName string
TransportDialKeepAlive time.Duration
TransportDialTimeout time.Duration
TransportDisableKeepAlives bool
TransportIdleConnTimeout time.Duration
TransportMaxIdleConns int
TransportMaxIdleConnsPerHost int
TransportTLSHandshakeTimeout time.Duration
}
// CreateVaultClientInput is used as input to the CreateVaultClient function.
type CreateVaultClientInput struct {
Address string
Namespace string
Token string
UnwrapToken bool
SSLEnabled bool
SSLVerify bool
SSLCert string
SSLKey string
SSLCACert string
SSLCAPath string
ServerName string
TransportDialKeepAlive time.Duration
TransportDialTimeout time.Duration
TransportDisableKeepAlives bool
TransportIdleConnTimeout time.Duration
TransportMaxIdleConns int
TransportMaxIdleConnsPerHost int
TransportTLSHandshakeTimeout time.Duration
}
// NewClientSet creates a new client set that is ready to accept clients.
func NewClientSet() *ClientSet {
return &ClientSet{}
}
// CreateConsulClient creates a new Consul API client from the given input.
func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error {
consulConfig := consulapi.DefaultConfig()
if i.Address != "" {
consulConfig.Address = i.Address
}
if i.Token != "" {
consulConfig.Token = i.Token
}
if i.AuthEnabled {
consulConfig.HttpAuth = &consulapi.HttpBasicAuth{
Username: i.AuthUsername,
Password: i.AuthPassword,
}
}
// This transport will attempt to keep connections open to the Consul server.
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: i.TransportDialTimeout,
KeepAlive: i.TransportDialKeepAlive,
}).Dial,
DisableKeepAlives: i.TransportDisableKeepAlives,
MaxIdleConns: i.TransportMaxIdleConns,
IdleConnTimeout: i.TransportIdleConnTimeout,
MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost,
TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout,
}
// Configure SSL
if i.SSLEnabled {
consulConfig.Scheme = "https"
var tlsConfig tls.Config
// Custom certificate or certificate and key
if i.SSLCert != "" && i.SSLKey != "" {
cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey)
if err != nil {
return fmt.Errorf("client set: consul: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
} else if i.SSLCert != "" {
cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert)
if err != nil {
return fmt.Errorf("client set: consul: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
// Custom CA certificate
if i.SSLCACert != "" || i.SSLCAPath != "" {
rootConfig := &rootcerts.Config{
CAFile: i.SSLCACert,
CAPath: i.SSLCAPath,
}
if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil {
return fmt.Errorf("client set: consul configuring TLS failed: %s", err)
}
}
// Construct all the certificates now
tlsConfig.BuildNameToCertificate()
// SSL verification
if i.ServerName != "" {
tlsConfig.ServerName = i.ServerName
tlsConfig.InsecureSkipVerify = false
}
if !i.SSLVerify {
log.Printf("[WARN] (clients) disabling consul SSL verification")
tlsConfig.InsecureSkipVerify = true
}
// Save the TLS config on our transport
transport.TLSClientConfig = &tlsConfig
}
// Setup the new transport
consulConfig.Transport = transport
// Create the API client
client, err := consulapi.NewClient(consulConfig)
if err != nil {
return fmt.Errorf("client set: consul: %s", err)
}
// Save the data on ourselves
c.Lock()
c.consul = &consulClient{
client: client,
transport: transport,
}
c.Unlock()
return nil
}
func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error {
vaultConfig := vaultapi.DefaultConfig()
if i.Address != "" {
vaultConfig.Address = i.Address
}
// This transport will attempt to keep connections open to the Vault server.
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: i.TransportDialTimeout,
KeepAlive: i.TransportDialKeepAlive,
}).Dial,
DisableKeepAlives: i.TransportDisableKeepAlives,
MaxIdleConns: i.TransportMaxIdleConns,
IdleConnTimeout: i.TransportIdleConnTimeout,
MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost,
TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout,
}
// Configure SSL
if i.SSLEnabled {
var tlsConfig tls.Config
// Custom certificate or certificate and key
if i.SSLCert != "" && i.SSLKey != "" {
cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey)
if err != nil {
return fmt.Errorf("client set: vault: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
} else if i.SSLCert != "" {
cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert)
if err != nil {
return fmt.Errorf("client set: vault: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
// Custom CA certificate
if i.SSLCACert != "" || i.SSLCAPath != "" {
rootConfig := &rootcerts.Config{
CAFile: i.SSLCACert,
CAPath: i.SSLCAPath,
}
if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil {
return fmt.Errorf("client set: vault configuring TLS failed: %s", err)
}
}
// Construct all the certificates now
tlsConfig.BuildNameToCertificate()
// SSL verification
if i.ServerName != "" {
tlsConfig.ServerName = i.ServerName
tlsConfig.InsecureSkipVerify = false
}
if !i.SSLVerify {
log.Printf("[WARN] (clients) disabling vault SSL verification")
tlsConfig.InsecureSkipVerify = true
}
// Save the TLS config on our transport
transport.TLSClientConfig = &tlsConfig
}
// Setup the new transport
vaultConfig.HttpClient.Transport = transport
// Create the client
client, err := vaultapi.NewClient(vaultConfig)
if err != nil {
return fmt.Errorf("client set: vault: %s", err)
}
// Set the namespace if given.
if i.Namespace != "" {
client.SetNamespace(i.Namespace)
}
// Set the token if given
if i.Token != "" {
client.SetToken(i.Token)
}
// Check if we are unwrapping
if i.UnwrapToken {
secret, err := client.Logical().Unwrap(i.Token)
if err != nil {
return fmt.Errorf("client set: vault unwrap: %s", err)
}
if secret == nil {
return fmt.Errorf("client set: vault unwrap: no secret")
}
if secret.Auth == nil {
return fmt.Errorf("client set: vault unwrap: no secret auth")
}
if secret.Auth.ClientToken == "" {
return fmt.Errorf("client set: vault unwrap: no token returned")
}
client.SetToken(secret.Auth.ClientToken)
}
// Save the data on ourselves
c.Lock()
c.vault = &vaultClient{
client: client,
httpClient: vaultConfig.HttpClient,
}
c.Unlock()
return nil
}
// Consul returns the Consul client for this set.
func (c *ClientSet) Consul() *consulapi.Client {
c.RLock()
defer c.RUnlock()
return c.consul.client
}
// Vault returns the Vault client for this set.
func (c *ClientSet) Vault() *vaultapi.Client {
c.RLock()
defer c.RUnlock()
return c.vault.client
}
// Stop closes all idle connections for any attached clients.
func (c *ClientSet) Stop() {
c.Lock()
defer c.Unlock()
if c.consul != nil {
c.consul.transport.CloseIdleConnections()
}
if c.vault != nil {
c.vault.httpClient.Transport.(*http.Transport).CloseIdleConnections()
}
}

View File

@ -0,0 +1,189 @@
package dependency
import (
"net/url"
"regexp"
"sort"
"strconv"
"time"
consulapi "github.com/hashicorp/consul/api"
)
const (
dcRe = `(@(?P<dc>[[:word:]\.\-\_]+))?`
keyRe = `/?(?P<key>[^@]+)`
filterRe = `(\|(?P<filter>[[:word:]\,]+))?`
serviceNameRe = `(?P<name>[[:word:]\-\_]+)`
nodeNameRe = `(?P<name>[[:word:]\.\-\_]+)`
nearRe = `(~(?P<near>[[:word:]\.\-\_]+))?`
prefixRe = `/?(?P<prefix>[^@]+)`
tagRe = `((?P<tag>[[:word:]=:\.\-\_]+)\.)?`
)
type Type int
const (
TypeConsul Type = iota
TypeVault
TypeLocal
)
// Dependency is an interface for a dependency that Consul Template is capable
// of watching.
type Dependency interface {
Fetch(*ClientSet, *QueryOptions) (interface{}, *ResponseMetadata, error)
CanShare() bool
String() string
Stop()
Type() Type
}
// ServiceTags is a slice of tags assigned to a Service
type ServiceTags []string
// QueryOptions is a list of options to send with the query. These options are
// client-agnostic, and the dependency determines which, if any, of the options
// to use.
type QueryOptions struct {
AllowStale bool
Datacenter string
Near string
RequireConsistent bool
VaultGrace time.Duration
WaitIndex uint64
WaitTime time.Duration
}
func (q *QueryOptions) Merge(o *QueryOptions) *QueryOptions {
var r QueryOptions
if q == nil {
if o == nil {
return &QueryOptions{}
}
r = *o
return &r
}
r = *q
if o == nil {
return &r
}
if o.AllowStale != false {
r.AllowStale = o.AllowStale
}
if o.Datacenter != "" {
r.Datacenter = o.Datacenter
}
if o.Near != "" {
r.Near = o.Near
}
if o.RequireConsistent != false {
r.RequireConsistent = o.RequireConsistent
}
if o.WaitIndex != 0 {
r.WaitIndex = o.WaitIndex
}
if o.WaitTime != 0 {
r.WaitTime = o.WaitTime
}
return &r
}
func (q *QueryOptions) ToConsulOpts() *consulapi.QueryOptions {
return &consulapi.QueryOptions{
AllowStale: q.AllowStale,
Datacenter: q.Datacenter,
Near: q.Near,
RequireConsistent: q.RequireConsistent,
WaitIndex: q.WaitIndex,
WaitTime: q.WaitTime,
}
}
func (q *QueryOptions) String() string {
u := &url.Values{}
if q.AllowStale {
u.Add("stale", strconv.FormatBool(q.AllowStale))
}
if q.Datacenter != "" {
u.Add("dc", q.Datacenter)
}
if q.Near != "" {
u.Add("near", q.Near)
}
if q.RequireConsistent {
u.Add("consistent", strconv.FormatBool(q.RequireConsistent))
}
if q.WaitIndex != 0 {
u.Add("index", strconv.FormatUint(q.WaitIndex, 10))
}
if q.WaitTime != 0 {
u.Add("wait", q.WaitTime.String())
}
return u.Encode()
}
// ResponseMetadata is a struct that contains metadata about the response. This
// is returned from a Fetch function call.
type ResponseMetadata struct {
LastIndex uint64
LastContact time.Duration
Block bool
}
// deepCopyAndSortTags deep copies the tags in the given string slice and then
// sorts and returns the copied result.
func deepCopyAndSortTags(tags []string) []string {
newTags := make([]string, 0, len(tags))
for _, tag := range tags {
newTags = append(newTags, tag)
}
sort.Strings(newTags)
return newTags
}
// respWithMetadata is a short wrapper to return the given interface with fake
// response metadata for non-Consul dependencies.
func respWithMetadata(i interface{}) (interface{}, *ResponseMetadata, error) {
return i, &ResponseMetadata{
LastContact: 0,
LastIndex: uint64(time.Now().Unix()),
}, nil
}
// regexpMatch matches the given regexp and extracts the match groups into a
// named map.
func regexpMatch(re *regexp.Regexp, q string) map[string]string {
names := re.SubexpNames()
match := re.FindAllStringSubmatch(q, -1)
if len(match) == 0 {
return map[string]string{}
}
m := map[string]string{}
for i, n := range match[0] {
if names[i] != "" {
m[names[i]] = n
}
}
return m
}

View File

@ -0,0 +1,13 @@
package dependency
import "errors"
// ErrStopped is a special error that is returned when a dependency is
// prematurely stopped, usually due to a configuration reload or a process
// interrupt.
var ErrStopped = errors.New("dependency stopped")
// ErrContinue is a special error which says to continue (retry) on error.
var ErrContinue = errors.New("dependency continue")
var ErrLeaseExpired = errors.New("lease expired or is not renewable")

View File

@ -0,0 +1,129 @@
package dependency
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"time"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*FileQuery)(nil)
// FileQuerySleepTime is the amount of time to sleep between queries, since
// the fsnotify library is not compatible with solaris and other OSes yet.
FileQuerySleepTime = 2 * time.Second
)
// FileQuery represents a local file dependency.
type FileQuery struct {
stopCh chan struct{}
path string
stat os.FileInfo
}
// NewFileQuery creates a file dependency from the given path.
func NewFileQuery(s string) (*FileQuery, error) {
s = strings.TrimSpace(s)
if s == "" {
return nil, fmt.Errorf("file: invalid format: %q", s)
}
return &FileQuery{
stopCh: make(chan struct{}, 1),
path: s,
}, nil
}
// Fetch retrieves this dependency and returns the result or any errors that
// occur in the process.
func (d *FileQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
log.Printf("[TRACE] %s: READ %s", d, d.path)
select {
case <-d.stopCh:
log.Printf("[TRACE] %s: stopped", d)
return "", nil, ErrStopped
case r := <-d.watch(d.stat):
if r.err != nil {
return "", nil, errors.Wrap(r.err, d.String())
}
log.Printf("[TRACE] %s: reported change", d)
data, err := ioutil.ReadFile(d.path)
if err != nil {
return "", nil, errors.Wrap(err, d.String())
}
d.stat = r.stat
return respWithMetadata(string(data))
}
}
// CanShare returns a boolean if this dependency is shareable.
func (d *FileQuery) CanShare() bool {
return false
}
// Stop halts the dependency's fetch function.
func (d *FileQuery) Stop() {
close(d.stopCh)
}
// String returns the human-friendly version of this dependency.
func (d *FileQuery) String() string {
return fmt.Sprintf("file(%s)", d.path)
}
// Type returns the type of this dependency.
func (d *FileQuery) Type() Type {
return TypeLocal
}
type watchResult struct {
stat os.FileInfo
err error
}
// watch watchers the file for changes
func (d *FileQuery) watch(lastStat os.FileInfo) <-chan *watchResult {
ch := make(chan *watchResult, 1)
go func(lastStat os.FileInfo) {
for {
stat, err := os.Stat(d.path)
if err != nil {
select {
case <-d.stopCh:
return
case ch <- &watchResult{err: err}:
return
}
}
changed := lastStat == nil ||
lastStat.Size() != stat.Size() ||
lastStat.ModTime() != stat.ModTime()
if changed {
select {
case <-d.stopCh:
return
case ch <- &watchResult{stat: stat}:
return
}
}
time.Sleep(FileQuerySleepTime)
}
}(lastStat)
return ch
}

View File

@ -0,0 +1,248 @@
package dependency
import (
"encoding/gob"
"fmt"
"log"
"net/url"
"regexp"
"sort"
"strings"
"github.com/hashicorp/consul/api"
"github.com/pkg/errors"
)
const (
HealthAny = "any"
HealthPassing = "passing"
HealthWarning = "warning"
HealthCritical = "critical"
HealthMaint = "maintenance"
NodeMaint = "_node_maintenance"
ServiceMaint = "_service_maintenance:"
)
var (
// Ensure implements
_ Dependency = (*HealthServiceQuery)(nil)
// HealthServiceQueryRe is the regular expression to use.
HealthServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + dcRe + nearRe + filterRe + `\z`)
)
func init() {
gob.Register([]*HealthService{})
}
// HealthService is a service entry in Consul.
type HealthService struct {
Node string
NodeID string
NodeAddress string
NodeTaggedAddresses map[string]string
NodeMeta map[string]string
ServiceMeta map[string]string
Address string
ID string
Name string
Tags ServiceTags
Checks api.HealthChecks
Status string
Port int
}
// HealthServiceQuery is the representation of all a service query in Consul.
type HealthServiceQuery struct {
stopCh chan struct{}
dc string
filters []string
name string
near string
tag string
}
// NewHealthServiceQuery processes the strings to build a service dependency.
func NewHealthServiceQuery(s string) (*HealthServiceQuery, error) {
if !HealthServiceQueryRe.MatchString(s) {
return nil, fmt.Errorf("health.service: invalid format: %q", s)
}
m := regexpMatch(HealthServiceQueryRe, s)
var filters []string
if filter := m["filter"]; filter != "" {
split := strings.Split(filter, ",")
for _, f := range split {
f = strings.TrimSpace(f)
switch f {
case HealthAny,
HealthPassing,
HealthWarning,
HealthCritical,
HealthMaint:
filters = append(filters, f)
case "":
default:
return nil, fmt.Errorf("health.service: invalid filter: %q in %q", f, s)
}
}
sort.Strings(filters)
} else {
filters = []string{HealthPassing}
}
return &HealthServiceQuery{
stopCh: make(chan struct{}, 1),
dc: m["dc"],
filters: filters,
name: m["name"],
near: m["near"],
tag: m["tag"],
}, nil
}
// Fetch queries the Consul API defined by the given client and returns a slice
// of HealthService objects.
func (d *HealthServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{
Datacenter: d.dc,
Near: d.near,
})
u := &url.URL{
Path: "/v1/health/service/" + d.name,
RawQuery: opts.String(),
}
if d.tag != "" {
q := u.Query()
q.Set("tag", d.tag)
u.RawQuery = q.Encode()
}
log.Printf("[TRACE] %s: GET %s", d, u)
// Check if a user-supplied filter was given. If so, we may be querying for
// more than healthy services, so we need to implement client-side filtering.
passingOnly := len(d.filters) == 1 && d.filters[0] == HealthPassing
entries, qm, err := clients.Consul().Health().Service(d.name, d.tag, passingOnly, opts.ToConsulOpts())
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
log.Printf("[TRACE] %s: returned %d results", d, len(entries))
list := make([]*HealthService, 0, len(entries))
for _, entry := range entries {
// Get the status of this service from its checks.
status := entry.Checks.AggregatedStatus()
// If we are not checking only healthy services, filter out services that do
// not match the given filter.
if !acceptStatus(d.filters, status) {
continue
}
// Get the address of the service, falling back to the address of the node.
address := entry.Service.Address
if address == "" {
address = entry.Node.Address
}
list = append(list, &HealthService{
Node: entry.Node.Node,
NodeID: entry.Node.ID,
NodeAddress: entry.Node.Address,
NodeTaggedAddresses: entry.Node.TaggedAddresses,
NodeMeta: entry.Node.Meta,
ServiceMeta: entry.Service.Meta,
Address: address,
ID: entry.Service.ID,
Name: entry.Service.Service,
Tags: ServiceTags(deepCopyAndSortTags(entry.Service.Tags)),
Status: status,
Checks: entry.Checks,
Port: entry.Service.Port,
})
}
log.Printf("[TRACE] %s: returned %d results after filtering", d, len(list))
// Sort unless the user explicitly asked for nearness
if d.near == "" {
sort.Stable(ByNodeThenID(list))
}
rm := &ResponseMetadata{
LastIndex: qm.LastIndex,
LastContact: qm.LastContact,
}
return list, rm, nil
}
// CanShare returns a boolean if this dependency is shareable.
func (d *HealthServiceQuery) CanShare() bool {
return true
}
// Stop halts the dependency's fetch function.
func (d *HealthServiceQuery) Stop() {
close(d.stopCh)
}
// String returns the human-friendly version of this dependency.
func (d *HealthServiceQuery) String() string {
name := d.name
if d.tag != "" {
name = d.tag + "." + name
}
if d.dc != "" {
name = name + "@" + d.dc
}
if d.near != "" {
name = name + "~" + d.near
}
if len(d.filters) > 0 {
name = name + "|" + strings.Join(d.filters, ",")
}
return fmt.Sprintf("health.service(%s)", name)
}
// Type returns the type of this dependency.
func (d *HealthServiceQuery) Type() Type {
return TypeConsul
}
// acceptStatus allows us to check if a slice of health checks pass this filter.
func acceptStatus(list []string, s string) bool {
for _, status := range list {
if status == s || status == HealthAny {
return true
}
}
return false
}
// ByNodeThenID is a sortable slice of Service
type ByNodeThenID []*HealthService
// Len, Swap, and Less are used to implement the sort.Sort interface.
func (s ByNodeThenID) Len() int { return len(s) }
func (s ByNodeThenID) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ByNodeThenID) Less(i, j int) bool {
if s[i].Node < s[j].Node {
return true
} else if s[i].Node == s[j].Node {
return s[i].ID <= s[j].ID
}
return false
}

View File

@ -0,0 +1,112 @@
package dependency
import (
"fmt"
"log"
"net/url"
"regexp"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*KVGetQuery)(nil)
// KVGetQueryRe is the regular expression to use.
KVGetQueryRe = regexp.MustCompile(`\A` + keyRe + dcRe + `\z`)
)
// KVGetQuery queries the KV store for a single key.
type KVGetQuery struct {
stopCh chan struct{}
dc string
key string
block bool
}
// NewKVGetQuery parses a string into a dependency.
func NewKVGetQuery(s string) (*KVGetQuery, error) {
if s != "" && !KVGetQueryRe.MatchString(s) {
return nil, fmt.Errorf("kv.get: invalid format: %q", s)
}
m := regexpMatch(KVGetQueryRe, s)
return &KVGetQuery{
stopCh: make(chan struct{}, 1),
dc: m["dc"],
key: m["key"],
}, nil
}
// Fetch queries the Consul API defined by the given client.
func (d *KVGetQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{
Datacenter: d.dc,
})
log.Printf("[TRACE] %s: GET %s", d, &url.URL{
Path: "/v1/kv/" + d.key,
RawQuery: opts.String(),
})
pair, qm, err := clients.Consul().KV().Get(d.key, opts.ToConsulOpts())
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
rm := &ResponseMetadata{
LastIndex: qm.LastIndex,
LastContact: qm.LastContact,
Block: d.block,
}
if pair == nil {
log.Printf("[TRACE] %s: returned nil", d)
return nil, rm, nil
}
value := string(pair.Value)
log.Printf("[TRACE] %s: returned %q", d, value)
return value, rm, nil
}
// EnableBlocking turns this into a blocking KV query.
func (d *KVGetQuery) EnableBlocking() {
d.block = true
}
// CanShare returns a boolean if this dependency is shareable.
func (d *KVGetQuery) CanShare() bool {
return true
}
// String returns the human-friendly version of this dependency.
func (d *KVGetQuery) String() string {
key := d.key
if d.dc != "" {
key = key + "@" + d.dc
}
if d.block {
return fmt.Sprintf("kv.block(%s)", key)
}
return fmt.Sprintf("kv.get(%s)", key)
}
// Stop halts the dependency's fetch function.
func (d *KVGetQuery) Stop() {
close(d.stopCh)
}
// Type returns the type of this dependency.
func (d *KVGetQuery) Type() Type {
return TypeConsul
}

View File

@ -0,0 +1,104 @@
package dependency
import (
"fmt"
"log"
"net/url"
"regexp"
"strings"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*KVKeysQuery)(nil)
// KVKeysQueryRe is the regular expression to use.
KVKeysQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`)
)
// KVKeysQuery queries the KV store for a single key.
type KVKeysQuery struct {
stopCh chan struct{}
dc string
prefix string
}
// NewKVKeysQuery parses a string into a dependency.
func NewKVKeysQuery(s string) (*KVKeysQuery, error) {
if s != "" && !KVKeysQueryRe.MatchString(s) {
return nil, fmt.Errorf("kv.keys: invalid format: %q", s)
}
m := regexpMatch(KVKeysQueryRe, s)
return &KVKeysQuery{
stopCh: make(chan struct{}, 1),
dc: m["dc"],
prefix: m["prefix"],
}, nil
}
// Fetch queries the Consul API defined by the given client.
func (d *KVKeysQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{
Datacenter: d.dc,
})
log.Printf("[TRACE] %s: GET %s", d, &url.URL{
Path: "/v1/kv/" + d.prefix,
RawQuery: opts.String(),
})
list, qm, err := clients.Consul().KV().Keys(d.prefix, "", opts.ToConsulOpts())
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
keys := make([]string, len(list))
for i, v := range list {
v = strings.TrimPrefix(v, d.prefix)
v = strings.TrimLeft(v, "/")
keys[i] = v
}
log.Printf("[TRACE] %s: returned %d results", d, len(list))
rm := &ResponseMetadata{
LastIndex: qm.LastIndex,
LastContact: qm.LastContact,
}
return keys, rm, nil
}
// CanShare returns a boolean if this dependency is shareable.
func (d *KVKeysQuery) CanShare() bool {
return true
}
// String returns the human-friendly version of this dependency.
func (d *KVKeysQuery) String() string {
prefix := d.prefix
if d.dc != "" {
prefix = prefix + "@" + d.dc
}
return fmt.Sprintf("kv.keys(%s)", prefix)
}
// Stop halts the dependency's fetch function.
func (d *KVKeysQuery) Stop() {
close(d.stopCh)
}
// Type returns the type of this dependency.
func (d *KVKeysQuery) Type() Type {
return TypeConsul
}

View File

@ -0,0 +1,133 @@
package dependency
import (
"encoding/gob"
"fmt"
"log"
"net/url"
"regexp"
"strings"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*KVListQuery)(nil)
// KVListQueryRe is the regular expression to use.
KVListQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`)
)
func init() {
gob.Register([]*KeyPair{})
}
// KeyPair is a simple Key-Value pair
type KeyPair struct {
Path string
Key string
Value string
// Lesser-used, but still valuable keys from api.KV
CreateIndex uint64
ModifyIndex uint64
LockIndex uint64
Flags uint64
Session string
}
// KVListQuery queries the KV store for a single key.
type KVListQuery struct {
stopCh chan struct{}
dc string
prefix string
}
// NewKVListQuery parses a string into a dependency.
func NewKVListQuery(s string) (*KVListQuery, error) {
if s != "" && !KVListQueryRe.MatchString(s) {
return nil, fmt.Errorf("kv.list: invalid format: %q", s)
}
m := regexpMatch(KVListQueryRe, s)
return &KVListQuery{
stopCh: make(chan struct{}, 1),
dc: m["dc"],
prefix: m["prefix"],
}, nil
}
// Fetch queries the Consul API defined by the given client.
func (d *KVListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{
Datacenter: d.dc,
})
log.Printf("[TRACE] %s: GET %s", d, &url.URL{
Path: "/v1/kv/" + d.prefix,
RawQuery: opts.String(),
})
list, qm, err := clients.Consul().KV().List(d.prefix, opts.ToConsulOpts())
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
log.Printf("[TRACE] %s: returned %d pairs", d, len(list))
pairs := make([]*KeyPair, 0, len(list))
for _, pair := range list {
key := strings.TrimPrefix(pair.Key, d.prefix)
key = strings.TrimLeft(key, "/")
pairs = append(pairs, &KeyPair{
Path: pair.Key,
Key: key,
Value: string(pair.Value),
CreateIndex: pair.CreateIndex,
ModifyIndex: pair.ModifyIndex,
LockIndex: pair.LockIndex,
Flags: pair.Flags,
Session: pair.Session,
})
}
rm := &ResponseMetadata{
LastIndex: qm.LastIndex,
LastContact: qm.LastContact,
}
return pairs, rm, nil
}
// CanShare returns a boolean if this dependency is shareable.
func (d *KVListQuery) CanShare() bool {
return true
}
// String returns the human-friendly version of this dependency.
func (d *KVListQuery) String() string {
prefix := d.prefix
if d.dc != "" {
prefix = prefix + "@" + d.dc
}
return fmt.Sprintf("kv.list(%s)", prefix)
}
// Stop halts the dependency's fetch function.
func (d *KVListQuery) Stop() {
close(d.stopCh)
}
// Type returns the type of this dependency.
func (d *KVListQuery) Type() Type {
return TypeConsul
}

View File

@ -0,0 +1,72 @@
package dependency
import (
"strings"
"sync"
)
// Set is a dependency-specific set implementation. Relative ordering is
// preserved.
type Set struct {
once sync.Once
sync.RWMutex
list []string
set map[string]Dependency
}
// Add adds a new element to the set if it does not already exist.
func (s *Set) Add(d Dependency) bool {
s.init()
s.Lock()
defer s.Unlock()
if _, ok := s.set[d.String()]; !ok {
s.list = append(s.list, d.String())
s.set[d.String()] = d
return true
}
return false
}
// Get retrieves a single element from the set by name.
func (s *Set) Get(v string) Dependency {
s.RLock()
defer s.RUnlock()
return s.set[v]
}
// List returns the insertion-ordered list of dependencies.
func (s *Set) List() []Dependency {
s.RLock()
defer s.RUnlock()
r := make([]Dependency, len(s.list))
for i, k := range s.list {
r[i] = s.set[k]
}
return r
}
// Len is the size of the set.
func (s *Set) Len() int {
s.RLock()
defer s.RUnlock()
return len(s.list)
}
// String is a string representation of the set.
func (s *Set) String() string {
s.RLock()
defer s.RUnlock()
return strings.Join(s.list, ", ")
}
func (s *Set) init() {
s.once.Do(func() {
if s.list == nil {
s.list = make([]string, 0, 8)
}
if s.set == nil {
s.set = make(map[string]Dependency)
}
})
}

View File

@ -0,0 +1,121 @@
package dependency
import (
"io/ioutil"
"log"
"os"
"strings"
"time"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*VaultAgentTokenQuery)(nil)
)
const (
// VaultAgentTokenSleepTime is the amount of time to sleep between queries, since
// the fsnotify library is not compatible with solaris and other OSes yet.
VaultAgentTokenSleepTime = 15 * time.Second
)
// VaultAgentTokenQuery is the dependency to Vault Agent token
type VaultAgentTokenQuery struct {
stopCh chan struct{}
path string
stat os.FileInfo
}
// NewVaultAgentTokenQuery creates a new dependency.
func NewVaultAgentTokenQuery(path string) (*VaultAgentTokenQuery, error) {
return &VaultAgentTokenQuery{
stopCh: make(chan struct{}, 1),
path: path,
}, nil
}
// Fetch retrieves this dependency and returns the result or any errors that
// occur in the process.
func (d *VaultAgentTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
log.Printf("[TRACE] %s: READ %s", d, d.path)
select {
case <-d.stopCh:
log.Printf("[TRACE] %s: stopped", d)
return "", nil, ErrStopped
case r := <-d.watch(d.stat):
if r.err != nil {
return "", nil, errors.Wrap(r.err, d.String())
}
log.Printf("[TRACE] %s: reported change", d)
token, err := ioutil.ReadFile(d.path)
if err != nil {
return "", nil, errors.Wrap(err, d.String())
}
d.stat = r.stat
clients.Vault().SetToken(strings.TrimSpace(string(token)))
}
return respWithMetadata("")
}
// CanShare returns if this dependency is sharable.
func (d *VaultAgentTokenQuery) CanShare() bool {
return false
}
// Stop halts the dependency's fetch function.
func (d *VaultAgentTokenQuery) Stop() {
close(d.stopCh)
}
// String returns the human-friendly version of this dependency.
func (d *VaultAgentTokenQuery) String() string {
return "vault-agent.token"
}
// Type returns the type of this dependency.
func (d *VaultAgentTokenQuery) Type() Type {
return TypeVault
}
// watch watches the file for changes
func (d *VaultAgentTokenQuery) watch(lastStat os.FileInfo) <-chan *watchResult {
ch := make(chan *watchResult, 1)
go func(lastStat os.FileInfo) {
for {
stat, err := os.Stat(d.path)
if err != nil {
select {
case <-d.stopCh:
return
case ch <- &watchResult{err: err}:
return
}
}
changed := lastStat == nil ||
lastStat.Size() != stat.Size() ||
lastStat.ModTime() != stat.ModTime()
if changed {
select {
case <-d.stopCh:
return
case ch <- &watchResult{stat: stat}:
return
}
}
time.Sleep(VaultAgentTokenSleepTime)
}
}(lastStat)
return ch
}

View File

@ -0,0 +1,348 @@
package dependency
import (
"log"
"math/rand"
"path"
"strings"
"time"
"crypto/x509"
"encoding/pem"
"github.com/hashicorp/vault/api"
)
var (
// VaultDefaultLeaseDuration is the default lease duration in seconds.
VaultDefaultLeaseDuration = 5 * time.Minute
)
// Secret is the structure returned for every secret within Vault.
type Secret struct {
// The request ID that generated this response
RequestID string
LeaseID string
LeaseDuration int
Renewable bool
// Data is the actual contents of the secret. The format of the data
// is arbitrary and up to the secret backend.
Data map[string]interface{}
// Warnings contains any warnings related to the operation. These
// are not issues that caused the command to fail, but that the
// client should be aware of.
Warnings []string
// Auth, if non-nil, means that there was authentication information
// attached to this response.
Auth *SecretAuth
// WrapInfo, if non-nil, means that the initial response was wrapped in the
// cubbyhole of the given token (which has a TTL of the given number of
// seconds)
WrapInfo *SecretWrapInfo
}
// SecretAuth is the structure containing auth information if we have it.
type SecretAuth struct {
ClientToken string
Accessor string
Policies []string
Metadata map[string]string
LeaseDuration int
Renewable bool
}
// SecretWrapInfo contains wrapping information if we have it. If what is
// contained is an authentication token, the accessor for the token will be
// available in WrappedAccessor.
type SecretWrapInfo struct {
Token string
TTL int
CreationTime time.Time
WrappedAccessor string
}
//
type renewer interface {
Dependency
stopChan() chan struct{}
secrets() (*Secret, *api.Secret)
}
func renewSecret(clients *ClientSet, d renewer) error {
log.Printf("[TRACE] %s: starting renewer", d)
secret, vaultSecret := d.secrets()
renewer, err := clients.Vault().NewRenewer(&api.RenewerInput{
Secret: vaultSecret,
})
if err != nil {
return err
}
go renewer.Renew()
defer renewer.Stop()
for {
select {
case err := <-renewer.DoneCh():
if err != nil {
log.Printf("[WARN] %s: failed to renew: %s", d, err)
}
log.Printf("[WARN] %s: renewer done (maybe the lease expired)", d)
return nil
case renewal := <-renewer.RenewCh():
log.Printf("[TRACE] %s: successfully renewed", d)
printVaultWarnings(d, renewal.Secret.Warnings)
updateSecret(secret, renewal.Secret)
case <-d.stopChan():
return ErrStopped
}
}
}
// durationFrom cert gets the duration of validity from cert data and
// returns that value as an integer number of seconds
func durationFromCert(certData string) int {
block, _ := pem.Decode([]byte(certData))
if block == nil {
return -1
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
log.Printf("[WARN] Unable to parse certificate data: %s", err)
return -1
}
return int(cert.NotAfter.Sub(cert.NotBefore).Seconds())
}
// leaseCheckWait accepts a secret and returns the recommended amount of
// time to sleep.
func leaseCheckWait(s *Secret) time.Duration {
// Handle whether this is an auth or a regular secret.
base := s.LeaseDuration
if s.Auth != nil && s.Auth.LeaseDuration > 0 {
base = s.Auth.LeaseDuration
}
// Handle if this is a certificate with no lease
if certInterface, ok := s.Data["certificate"]; ok && s.LeaseID == "" {
if certData, ok := certInterface.(string); ok {
newDuration := durationFromCert(certData)
if newDuration > 0 {
log.Printf("[DEBUG] Found certificate and set lease duration to %d seconds", newDuration)
base = newDuration
}
}
}
// Ensure we have a lease duration, since sometimes this can be zero.
if base <= 0 {
base = int(VaultDefaultLeaseDuration.Seconds())
}
// Convert to float seconds.
sleep := float64(time.Duration(base) * time.Second)
if vaultSecretRenewable(s) {
// Renew at 1/3 the remaining lease. This will give us an opportunity to retry
// at least one more time should the first renewal fail.
sleep = sleep / 3.0
// Use some randomness so many clients do not hit Vault simultaneously.
sleep = sleep * (rand.Float64() + 1) / 2.0
} else {
// For non-renewable leases set the renew duration to use much of the secret
// lease as possible. Use a stagger over 85%-95% of the lease duration so that
// many clients do not hit Vault simultaneously.
sleep = sleep * (.85 + rand.Float64()*0.1)
}
return time.Duration(sleep)
}
// printVaultWarnings prints warnings for a given dependency.
func printVaultWarnings(d Dependency, warnings []string) {
for _, w := range warnings {
log.Printf("[WARN] %s: %s", d, w)
}
}
// vaultSecretRenewable determines if the given secret is renewable.
func vaultSecretRenewable(s *Secret) bool {
if s.Auth != nil {
return s.Auth.Renewable
}
return s.Renewable
}
// transformSecret transforms an api secret into our secret. This does not deep
// copy underlying deep data structures, so it's not safe to modify the vault
// secret as that may modify the data in the transformed secret.
func transformSecret(theirs *api.Secret) *Secret {
var ours Secret
updateSecret(&ours, theirs)
return &ours
}
// updateSecret updates our secret with the new data from the api, careful to
// not overwrite missing data. Renewals don't include the original secret, and
// we don't want to delete that data accidentally.
func updateSecret(ours *Secret, theirs *api.Secret) {
if theirs.RequestID != "" {
ours.RequestID = theirs.RequestID
}
if theirs.LeaseID != "" {
ours.LeaseID = theirs.LeaseID
}
if theirs.LeaseDuration != 0 {
ours.LeaseDuration = theirs.LeaseDuration
}
if theirs.Renewable {
ours.Renewable = theirs.Renewable
}
if len(theirs.Data) != 0 {
ours.Data = theirs.Data
}
if len(theirs.Warnings) != 0 {
ours.Warnings = theirs.Warnings
}
if theirs.Auth != nil {
if ours.Auth == nil {
ours.Auth = &SecretAuth{}
}
if theirs.Auth.ClientToken != "" {
ours.Auth.ClientToken = theirs.Auth.ClientToken
}
if theirs.Auth.Accessor != "" {
ours.Auth.Accessor = theirs.Auth.Accessor
}
if len(theirs.Auth.Policies) != 0 {
ours.Auth.Policies = theirs.Auth.Policies
}
if len(theirs.Auth.Metadata) != 0 {
ours.Auth.Metadata = theirs.Auth.Metadata
}
if theirs.Auth.LeaseDuration != 0 {
ours.Auth.LeaseDuration = theirs.Auth.LeaseDuration
}
if theirs.Auth.Renewable {
ours.Auth.Renewable = theirs.Auth.Renewable
}
}
if theirs.WrapInfo != nil {
if ours.WrapInfo == nil {
ours.WrapInfo = &SecretWrapInfo{}
}
if theirs.WrapInfo.Token != "" {
ours.WrapInfo.Token = theirs.WrapInfo.Token
}
if theirs.WrapInfo.TTL != 0 {
ours.WrapInfo.TTL = theirs.WrapInfo.TTL
}
if !theirs.WrapInfo.CreationTime.IsZero() {
ours.WrapInfo.CreationTime = theirs.WrapInfo.CreationTime
}
if theirs.WrapInfo.WrappedAccessor != "" {
ours.WrapInfo.WrappedAccessor = theirs.WrapInfo.WrappedAccessor
}
}
}
func isKVv2(client *api.Client, path string) (string, bool, error) {
// We don't want to use a wrapping call here so save any custom value and
// restore after
currentWrappingLookupFunc := client.CurrentWrappingLookupFunc()
client.SetWrappingLookupFunc(nil)
defer client.SetWrappingLookupFunc(currentWrappingLookupFunc)
currentOutputCurlString := client.OutputCurlString()
client.SetOutputCurlString(false)
defer client.SetOutputCurlString(currentOutputCurlString)
r := client.NewRequest("GET", "/v1/sys/internal/ui/mounts/"+path)
resp, err := client.RawRequest(r)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
// If we get a 404 we are using an older version of vault, default to
// version 1
if resp != nil && resp.StatusCode == 404 {
return "", false, nil
}
// anonymous requests may fail to access /sys/internal/ui path
// Vault v1.1.3 returns 500 status code but may return 4XX in future
if client.Token() == "" {
return "", false, nil
}
return "", false, err
}
secret, err := api.ParseSecret(resp.Body)
if err != nil {
return "", false, err
}
var mountPath string
if mountPathRaw, ok := secret.Data["path"]; ok {
mountPath = mountPathRaw.(string)
}
var mountType string
if mountTypeRaw, ok := secret.Data["type"]; ok {
mountType = mountTypeRaw.(string)
}
options := secret.Data["options"]
if options == nil {
return mountPath, false, nil
}
versionRaw := options.(map[string]interface{})["version"]
if versionRaw == nil {
return mountPath, false, nil
}
version := versionRaw.(string)
switch version {
case "", "1":
return mountPath, false, nil
case "2":
return mountPath, mountType == "kv", nil
}
return mountPath, false, nil
}
func addPrefixToVKVPath(p, mountPath, apiPrefix string) string {
switch {
case p == mountPath, p == strings.TrimSuffix(mountPath, "/"):
return path.Join(mountPath, apiPrefix)
default:
p = strings.TrimPrefix(p, mountPath)
// Don't add /data to the path if it's been added manually.
if strings.HasPrefix(p, apiPrefix) {
return path.Join(mountPath, p)
}
return path.Join(mountPath, apiPrefix, p)
}
}

View File

@ -0,0 +1,126 @@
package dependency
import (
"fmt"
"log"
"net/url"
"sort"
"strings"
"time"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*VaultListQuery)(nil)
)
// VaultListQuery is the dependency to Vault for a secret
type VaultListQuery struct {
stopCh chan struct{}
path string
}
// NewVaultListQuery creates a new datacenter dependency.
func NewVaultListQuery(s string) (*VaultListQuery, error) {
s = strings.TrimSpace(s)
s = strings.Trim(s, "/")
if s == "" {
return nil, fmt.Errorf("vault.list: invalid format: %q", s)
}
return &VaultListQuery{
stopCh: make(chan struct{}, 1),
path: s,
}, nil
}
// Fetch queries the Vault API
func (d *VaultListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
opts = opts.Merge(&QueryOptions{})
// If this is not the first query, poll to simulate blocking-queries.
if opts.WaitIndex != 0 {
dur := VaultDefaultLeaseDuration
log.Printf("[TRACE] %s: long polling for %s", d, dur)
select {
case <-d.stopCh:
return nil, nil, ErrStopped
case <-time.After(dur):
}
}
// If we got this far, we either didn't have a secret to renew, the secret was
// not renewable, or the renewal failed, so attempt a fresh list.
log.Printf("[TRACE] %s: LIST %s", d, &url.URL{
Path: "/v1/" + d.path,
RawQuery: opts.String(),
})
secret, err := clients.Vault().Logical().List(d.path)
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
var result []string
// The secret could be nil if it does not exist.
if secret == nil || secret.Data == nil {
log.Printf("[TRACE] %s: no data", d)
return respWithMetadata(result)
}
// This is a weird thing that happened once...
keys, ok := secret.Data["keys"]
if !ok {
log.Printf("[TRACE] %s: no keys", d)
return respWithMetadata(result)
}
list, ok := keys.([]interface{})
if !ok {
log.Printf("[TRACE] %s: not list", d)
return nil, nil, fmt.Errorf("%s: unexpected response", d)
}
for _, v := range list {
typed, ok := v.(string)
if !ok {
return nil, nil, fmt.Errorf("%s: non-string in list", d)
}
result = append(result, typed)
}
sort.Strings(result)
log.Printf("[TRACE] %s: returned %d results", d, len(result))
return respWithMetadata(result)
}
// CanShare returns if this dependency is shareable.
func (d *VaultListQuery) CanShare() bool {
return false
}
// Stop halts the given dependency's fetch.
func (d *VaultListQuery) Stop() {
close(d.stopCh)
}
// String returns the human-friendly version of this dependency.
func (d *VaultListQuery) String() string {
return fmt.Sprintf("vault.list(%s)", d.path)
}
// Type returns the type of this dependency.
func (d *VaultListQuery) Type() Type {
return TypeVault
}

View File

@ -0,0 +1,175 @@
package dependency
import (
"fmt"
"log"
"net/url"
"strings"
"time"
"github.com/hashicorp/vault/api"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*VaultReadQuery)(nil)
)
// VaultReadQuery is the dependency to Vault for a secret
type VaultReadQuery struct {
stopCh chan struct{}
sleepCh chan time.Duration
rawPath string
queryValues url.Values
secret *Secret
isKVv2 *bool
secretPath string
// vaultSecret is the actual Vault secret which we are renewing
vaultSecret *api.Secret
}
// NewVaultReadQuery creates a new datacenter dependency.
func NewVaultReadQuery(s string) (*VaultReadQuery, error) {
s = strings.TrimSpace(s)
s = strings.Trim(s, "/")
if s == "" {
return nil, fmt.Errorf("vault.read: invalid format: %q", s)
}
secretURL, err := url.Parse(s)
if err != nil {
return nil, err
}
return &VaultReadQuery{
stopCh: make(chan struct{}, 1),
sleepCh: make(chan time.Duration, 1),
rawPath: secretURL.Path,
queryValues: secretURL.Query(),
}, nil
}
// Fetch queries the Vault API
func (d *VaultReadQuery) Fetch(clients *ClientSet, opts *QueryOptions,
) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
select {
case dur := <-d.sleepCh:
time.Sleep(dur)
default:
}
firstRun := d.secret == nil
if !firstRun && vaultSecretRenewable(d.secret) {
err := renewSecret(clients, d)
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
}
err := d.fetchSecret(clients, opts)
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
if !vaultSecretRenewable(d.secret) {
dur := leaseCheckWait(d.secret)
log.Printf("[TRACE] %s: non-renewable secret, set sleep for %s", d, dur)
d.sleepCh <- dur
}
return respWithMetadata(d.secret)
}
func (d *VaultReadQuery) fetchSecret(clients *ClientSet, opts *QueryOptions,
) error {
opts = opts.Merge(&QueryOptions{})
vaultSecret, err := d.readSecret(clients, opts)
if err == nil {
printVaultWarnings(d, vaultSecret.Warnings)
d.vaultSecret = vaultSecret
// the cloned secret which will be exposed to the template
d.secret = transformSecret(vaultSecret)
}
return err
}
func (d *VaultReadQuery) stopChan() chan struct{} {
return d.stopCh
}
func (d *VaultReadQuery) secrets() (*Secret, *api.Secret) {
return d.secret, d.vaultSecret
}
// CanShare returns if this dependency is shareable.
func (d *VaultReadQuery) CanShare() bool {
return false
}
// Stop halts the given dependency's fetch.
func (d *VaultReadQuery) Stop() {
close(d.stopCh)
}
// String returns the human-friendly version of this dependency.
func (d *VaultReadQuery) String() string {
return fmt.Sprintf("vault.read(%s)", d.rawPath)
}
// Type returns the type of this dependency.
func (d *VaultReadQuery) Type() Type {
return TypeVault
}
func (d *VaultReadQuery) readSecret(clients *ClientSet, opts *QueryOptions) (*api.Secret, error) {
vaultClient := clients.Vault()
// Check whether this secret refers to a KV v2 entry if we haven't yet.
if d.isKVv2 == nil {
mountPath, isKVv2, err := isKVv2(vaultClient, d.rawPath)
if err != nil {
log.Printf("[WARN] %s: failed to check if %s is KVv2, "+
"assume not: %s", d, d.rawPath, err)
isKVv2 = false
d.secretPath = d.rawPath
} else if isKVv2 {
d.secretPath = addPrefixToVKVPath(d.rawPath, mountPath, "data")
} else {
d.secretPath = d.rawPath
}
d.isKVv2 = &isKVv2
}
queryString := d.queryValues.Encode()
log.Printf("[TRACE] %s: GET %s", d, &url.URL{
Path: "/v1/" + d.secretPath,
RawQuery: queryString,
})
vaultSecret, err := vaultClient.Logical().ReadWithData(d.secretPath,
d.queryValues)
if err != nil {
return nil, errors.Wrap(err, d.String())
}
if vaultSecret == nil || deletedKVv2(vaultSecret) {
return nil, fmt.Errorf("no secret exists at %s", d.secretPath)
}
return vaultSecret, nil
}
func deletedKVv2(s *api.Secret) bool {
switch md := s.Data["metadata"].(type) {
case map[string]interface{}:
return md["deletion_time"] != ""
}
return false
}

View File

@ -0,0 +1,95 @@
package dependency
import (
"log"
"time"
"github.com/hashicorp/vault/api"
)
var (
// Ensure implements
_ Dependency = (*VaultTokenQuery)(nil)
)
// VaultTokenQuery is the dependency to Vault for a secret
type VaultTokenQuery struct {
stopCh chan struct{}
secret *Secret
vaultSecret *api.Secret
}
// NewVaultTokenQuery creates a new dependency.
func NewVaultTokenQuery(token string) (*VaultTokenQuery, error) {
vaultSecret := &api.Secret{
Auth: &api.SecretAuth{
ClientToken: token,
Renewable: true,
LeaseDuration: 1,
},
}
return &VaultTokenQuery{
stopCh: make(chan struct{}, 1),
vaultSecret: vaultSecret,
secret: transformSecret(vaultSecret),
}, nil
}
// Fetch queries the Vault API
func (d *VaultTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
if vaultSecretRenewable(d.secret) {
renewSecret(clients, d)
}
// The secret isn't renewable, probably the generic secret backend.
// TODO This is incorrect when given a non-renewable template. We should
// instead to a lookup self to determine the lease duration.
opts = opts.Merge(&QueryOptions{})
dur := leaseCheckWait(d.secret)
if dur < opts.VaultGrace {
dur = opts.VaultGrace
}
log.Printf("[TRACE] %s: token is not renewable, sleeping for %s", d, dur)
select {
case <-time.After(dur):
case <-d.stopCh:
return nil, nil, ErrStopped
}
return nil, nil, ErrLeaseExpired
}
func (d *VaultTokenQuery) stopChan() chan struct{} {
return d.stopCh
}
func (d *VaultTokenQuery) secrets() (*Secret, *api.Secret) {
return d.secret, d.vaultSecret
}
// CanShare returns if this dependency is shareable.
func (d *VaultTokenQuery) CanShare() bool {
return false
}
// Stop halts the dependency's fetch function.
func (d *VaultTokenQuery) Stop() {
close(d.stopCh)
}
// String returns the human-friendly version of this dependency.
func (d *VaultTokenQuery) String() string {
return "vault.token"
}
// Type returns the type of this dependency.
func (d *VaultTokenQuery) Type() Type {
return TypeVault
}

View File

@ -0,0 +1,177 @@
package dependency
import (
"crypto/sha1"
"fmt"
"io"
"log"
"net/url"
"sort"
"strings"
"time"
"github.com/hashicorp/vault/api"
"github.com/pkg/errors"
)
var (
// Ensure implements
_ Dependency = (*VaultWriteQuery)(nil)
)
// VaultWriteQuery is the dependency to Vault for a secret
type VaultWriteQuery struct {
stopCh chan struct{}
sleepCh chan time.Duration
path string
data map[string]interface{}
dataHash string
secret *Secret
// vaultSecret is the actual Vault secret which we are renewing
vaultSecret *api.Secret
}
// NewVaultWriteQuery creates a new datacenter dependency.
func NewVaultWriteQuery(s string, d map[string]interface{}) (*VaultWriteQuery, error) {
s = strings.TrimSpace(s)
s = strings.Trim(s, "/")
if s == "" {
return nil, fmt.Errorf("vault.write: invalid format: %q", s)
}
return &VaultWriteQuery{
stopCh: make(chan struct{}, 1),
sleepCh: make(chan time.Duration, 1),
path: s,
data: d,
dataHash: sha1Map(d),
}, nil
}
// Fetch queries the Vault API
func (d *VaultWriteQuery) Fetch(clients *ClientSet, opts *QueryOptions,
) (interface{}, *ResponseMetadata, error) {
select {
case <-d.stopCh:
return nil, nil, ErrStopped
default:
}
select {
case dur := <-d.sleepCh:
time.Sleep(dur)
default:
}
firstRun := d.secret == nil
if !firstRun && vaultSecretRenewable(d.secret) {
err := renewSecret(clients, d)
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
}
opts = opts.Merge(&QueryOptions{})
vaultSecret, err := d.writeSecret(clients, opts)
if err != nil {
return nil, nil, errors.Wrap(err, d.String())
}
// vaultSecret == nil when writing to KVv1 engines
if vaultSecret == nil {
return respWithMetadata(d.secret)
}
printVaultWarnings(d, vaultSecret.Warnings)
d.vaultSecret = vaultSecret
// cloned secret which will be exposed to the template
d.secret = transformSecret(vaultSecret)
if !vaultSecretRenewable(d.secret) {
dur := leaseCheckWait(d.secret)
log.Printf("[TRACE] %s: non-renewable secret, set sleep for %s", d, dur)
d.sleepCh <- dur
}
return respWithMetadata(d.secret)
}
// meet renewer interface
func (d *VaultWriteQuery) stopChan() chan struct{} {
return d.stopCh
}
func (d *VaultWriteQuery) secrets() (*Secret, *api.Secret) {
return d.secret, d.vaultSecret
}
// CanShare returns if this dependency is shareable.
func (d *VaultWriteQuery) CanShare() bool {
return false
}
// Stop halts the given dependency's fetch.
func (d *VaultWriteQuery) Stop() {
close(d.stopCh)
}
// String returns the human-friendly version of this dependency.
func (d *VaultWriteQuery) String() string {
return fmt.Sprintf("vault.write(%s -> %s)", d.path, d.dataHash)
}
// Type returns the type of this dependency.
func (d *VaultWriteQuery) Type() Type {
return TypeVault
}
// sha1Map returns the sha1 hash of the data in the map. The reason this data is
// hashed is because it appears in the output and could contain sensitive
// information.
func sha1Map(m map[string]interface{}) string {
keys := make([]string, 0, len(m))
for k, _ := range m {
keys = append(keys, k)
}
sort.Strings(keys)
h := sha1.New()
for _, k := range keys {
io.WriteString(h, fmt.Sprintf("%s=%q", k, m[k]))
}
return fmt.Sprintf("%.4x", h.Sum(nil))
}
func (d *VaultWriteQuery) printWarnings(warnings []string) {
for _, w := range warnings {
log.Printf("[WARN] %s: %s", d, w)
}
}
func (d *VaultWriteQuery) writeSecret(clients *ClientSet, opts *QueryOptions) (*api.Secret, error) {
log.Printf("[TRACE] %s: PUT %s", d, &url.URL{
Path: "/v1/" + d.path,
RawQuery: opts.String(),
})
data := d.data
_, isv2, _ := isKVv2(clients.Vault(), d.path)
if isv2 {
data = map[string]interface{}{"data": d.data}
}
vaultSecret, err := clients.Vault().Logical().Write(d.path, data)
if err != nil {
return nil, errors.Wrap(err, d.String())
}
// vaultSecret is always nil when KVv1 engine (isv2==false)
if isv2 && vaultSecret == nil {
return nil, fmt.Errorf("no secret exists at %s", d.path)
}
return vaultSecret, nil
}

View File

@ -0,0 +1,512 @@
package manager
import (
"bytes"
"compress/lzw"
"encoding/gob"
"fmt"
"log"
"path"
"sync"
"time"
"github.com/mitchellh/hashstructure"
"github.com/hashicorp/consul-template/config"
dep "github.com/hashicorp/consul-template/dependency"
"github.com/hashicorp/consul-template/template"
"github.com/hashicorp/consul-template/version"
consulapi "github.com/hashicorp/consul/api"
)
var (
// sessionCreateRetry is the amount of time we wait
// to recreate a session when lost.
sessionCreateRetry = 15 * time.Second
// lockRetry is the interval on which we try to re-acquire locks
lockRetry = 10 * time.Second
// listRetry is the interval on which we retry listing a data path
listRetry = 10 * time.Second
// timeout passed through to consul api client Lock
// here to override in testing (see ./dedup_test.go)
lockWaitTime = 15 * time.Second
)
const (
templateNoDataStr = "__NO_DATA__"
)
// templateData is GOB encoded share the dependency values
type templateData struct {
// Version is the version of Consul Template which created this template data.
// This is important because users may be running multiple versions of CT
// with the same templates. This provides a nicer upgrade path.
Version string
// Data is the actual template data.
Data map[string]interface{}
}
func templateNoData() []byte {
return []byte(templateNoDataStr)
}
// DedupManager is used to de-duplicate which instance of Consul-Template
// is handling each template. For each template, a lock path is determined
// using the MD5 of the template. This path is used to elect a "leader"
// instance.
//
// The leader instance operations like usual, but any time a template is
// rendered, any of the data required for rendering is stored in the
// Consul KV store under the lock path.
//
// The follower instances depend on the leader to do the primary watching
// and rendering, and instead only watch the aggregated data in the KV.
// Followers wait for updates and re-render the template.
//
// If a template depends on 50 views, and is running on 50 machines, that
// would normally require 2500 blocking queries. Using deduplication, one
// instance has 50 view queries, plus 50 additional queries on the lock
// path for a total of 100.
//
type DedupManager struct {
// config is the deduplicate configuration
config *config.DedupConfig
// clients is used to access the underlying clients
clients *dep.ClientSet
// Brain is where we inject updates
brain *template.Brain
// templates is the set of templates we are trying to dedup
templates []*template.Template
// leader tracks if we are currently the leader
leader map[*template.Template]<-chan struct{}
leaderLock sync.RWMutex
// lastWrite tracks the hash of the data paths
lastWrite map[*template.Template]uint64
lastWriteLock sync.RWMutex
// updateCh is used to indicate an update watched data
updateCh chan struct{}
// wg is used to wait for a clean shutdown
wg sync.WaitGroup
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
// NewDedupManager creates a new Dedup manager
func NewDedupManager(config *config.DedupConfig, clients *dep.ClientSet, brain *template.Brain, templates []*template.Template) (*DedupManager, error) {
d := &DedupManager{
config: config,
clients: clients,
brain: brain,
templates: templates,
leader: make(map[*template.Template]<-chan struct{}),
lastWrite: make(map[*template.Template]uint64),
updateCh: make(chan struct{}, 1),
stopCh: make(chan struct{}),
}
return d, nil
}
// Start is used to start the de-duplication manager
func (d *DedupManager) Start() error {
log.Printf("[INFO] (dedup) starting de-duplication manager")
client := d.clients.Consul()
go d.createSession(client)
// Start to watch each template
for _, t := range d.templates {
go d.watchTemplate(client, t)
}
return nil
}
// Stop is used to stop the de-duplication manager
func (d *DedupManager) Stop() error {
d.stopLock.Lock()
defer d.stopLock.Unlock()
if d.stop {
return nil
}
log.Printf("[INFO] (dedup) stopping de-duplication manager")
d.stop = true
close(d.stopCh)
d.wg.Wait()
return nil
}
// createSession is used to create and maintain a session to Consul
func (d *DedupManager) createSession(client *consulapi.Client) {
START:
log.Printf("[INFO] (dedup) attempting to create session")
session := client.Session()
sessionCh := make(chan struct{})
ttl := fmt.Sprintf("%.6fs", float64(*d.config.TTL)/float64(time.Second))
se := &consulapi.SessionEntry{
Name: "Consul-Template de-duplication",
Behavior: "delete",
TTL: ttl,
LockDelay: 1 * time.Millisecond,
}
id, _, err := session.Create(se, nil)
if err != nil {
log.Printf("[ERR] (dedup) failed to create session: %v", err)
goto WAIT
}
log.Printf("[INFO] (dedup) created session %s", id)
// Attempt to lock each template
for _, t := range d.templates {
d.wg.Add(1)
go d.attemptLock(client, id, sessionCh, t)
}
// Renew our session periodically
if err := session.RenewPeriodic("15s", id, nil, d.stopCh); err != nil {
log.Printf("[ERR] (dedup) failed to renew session: %v", err)
}
close(sessionCh)
d.wg.Wait()
WAIT:
select {
case <-time.After(sessionCreateRetry):
goto START
case <-d.stopCh:
return
}
}
// IsLeader checks if we are currently the leader instance
func (d *DedupManager) IsLeader(tmpl *template.Template) bool {
d.leaderLock.RLock()
defer d.leaderLock.RUnlock()
lockCh, ok := d.leader[tmpl]
if !ok {
return false
}
select {
case <-lockCh:
return false
default:
return true
}
}
// UpdateDeps is used to update the values of the dependencies for a template
func (d *DedupManager) UpdateDeps(t *template.Template, deps []dep.Dependency) error {
// Calculate the path to write updates to
dataPath := path.Join(*d.config.Prefix, t.ID(), "data")
// Package up the dependency data
td := templateData{
Version: version.Version,
Data: make(map[string]interface{}),
}
for _, dp := range deps {
// Skip any dependencies that can't be shared
if !dp.CanShare() {
continue
}
// Pull the current value from the brain
val, ok := d.brain.Recall(dp)
if ok {
td.Data[dp.String()] = val
}
}
// Compute stable hash of the data. Note we don't compute this over the actual
// encoded value since gob encoding does not guarantee stable ordering for
// maps so spuriously returns a different hash most times. See
// https://github.com/hashicorp/consul-template/issues/1099.
hash, err := hashstructure.Hash(td, nil)
if err != nil {
return fmt.Errorf("calculating hash failed: %v", err)
}
d.lastWriteLock.RLock()
existing, ok := d.lastWrite[t]
d.lastWriteLock.RUnlock()
if ok && existing == hash {
log.Printf("[INFO] (dedup) de-duplicate data '%s' already current",
dataPath)
return nil
}
// Encode via GOB and LZW compress
var buf bytes.Buffer
compress := lzw.NewWriter(&buf, lzw.LSB, 8)
enc := gob.NewEncoder(compress)
if err := enc.Encode(&td); err != nil {
return fmt.Errorf("encode failed: %v", err)
}
compress.Close()
// Write the KV update
kvPair := consulapi.KVPair{
Key: dataPath,
Value: buf.Bytes(),
Flags: consulapi.LockFlagValue,
}
client := d.clients.Consul()
if _, err := client.KV().Put(&kvPair, nil); err != nil {
return fmt.Errorf("failed to write '%s': %v", dataPath, err)
}
log.Printf("[INFO] (dedup) updated de-duplicate data '%s'", dataPath)
d.lastWriteLock.Lock()
d.lastWrite[t] = hash
d.lastWriteLock.Unlock()
return nil
}
// UpdateCh returns a channel to watch for dependency updates
func (d *DedupManager) UpdateCh() <-chan struct{} {
return d.updateCh
}
// setLeader sets if we are currently the leader instance
func (d *DedupManager) setLeader(tmpl *template.Template, lockCh <-chan struct{}) {
// Update the lock state
d.leaderLock.Lock()
if lockCh != nil {
d.leader[tmpl] = lockCh
} else {
delete(d.leader, tmpl)
}
d.leaderLock.Unlock()
// Clear the lastWrite hash if we've lost leadership
if lockCh == nil {
d.lastWriteLock.Lock()
delete(d.lastWrite, tmpl)
d.lastWriteLock.Unlock()
}
// Do an async notify of an update
select {
case d.updateCh <- struct{}{}:
default:
}
}
func (d *DedupManager) watchTemplate(client *consulapi.Client, t *template.Template) {
log.Printf("[INFO] (dedup) starting watch for template hash %s", t.ID())
path := path.Join(*d.config.Prefix, t.ID(), "data")
// Determine if stale queries are allowed
var allowStale bool
if *d.config.MaxStale != 0 {
allowStale = true
}
// Setup our query options
opts := &consulapi.QueryOptions{
AllowStale: allowStale,
WaitTime: 60 * time.Second,
}
var lastData []byte
var lastIndex uint64
START:
// Stop listening if we're stopped
select {
case <-d.stopCh:
return
default:
}
// If we are current the leader, wait for leadership lost
d.leaderLock.RLock()
lockCh, ok := d.leader[t]
d.leaderLock.RUnlock()
if ok {
select {
case <-lockCh:
goto START
case <-d.stopCh:
return
}
}
// Block for updates on the data key
log.Printf("[INFO] (dedup) listing data for template hash %s", t.ID())
pair, meta, err := client.KV().Get(path, opts)
if err != nil {
log.Printf("[ERR] (dedup) failed to get '%s': %v", path, err)
select {
case <-time.After(listRetry):
goto START
case <-d.stopCh:
return
}
}
opts.WaitIndex = meta.LastIndex
// Stop listening if we're stopped
select {
case <-d.stopCh:
return
default:
}
// If we've exceeded the maximum staleness, retry without stale
if allowStale && meta.LastContact > *d.config.MaxStale {
allowStale = false
log.Printf("[DEBUG] (dedup) %s stale data (last contact exceeded max_stale)", path)
goto START
}
// Re-enable stale queries if allowed
if *d.config.MaxStale > 0 {
allowStale = true
}
if meta.LastIndex == lastIndex {
log.Printf("[TRACE] (dedup) %s no new data (index was the same)", path)
goto START
}
if meta.LastIndex < lastIndex {
log.Printf("[TRACE] (dedup) %s had a lower index, resetting", path)
lastIndex = 0
goto START
}
lastIndex = meta.LastIndex
var data []byte
if pair != nil {
data = pair.Value
}
if bytes.Equal(lastData, data) {
log.Printf("[TRACE] (dedup) %s no new data (contents were the same)", path)
goto START
}
lastData = data
// If we are current the leader, wait for leadership lost
d.leaderLock.RLock()
lockCh, ok = d.leader[t]
d.leaderLock.RUnlock()
if ok {
select {
case <-lockCh:
goto START
case <-d.stopCh:
return
}
}
// Parse the data file
if pair != nil && pair.Flags == consulapi.LockFlagValue && !bytes.Equal(pair.Value, templateNoData()) {
d.parseData(pair.Key, pair.Value)
}
goto START
}
// parseData is used to update brain from a KV data pair
func (d *DedupManager) parseData(path string, raw []byte) {
// Setup the decompression and decoders
r := bytes.NewReader(raw)
decompress := lzw.NewReader(r, lzw.LSB, 8)
defer decompress.Close()
dec := gob.NewDecoder(decompress)
// Decode the data
var td templateData
if err := dec.Decode(&td); err != nil {
log.Printf("[ERR] (dedup) failed to decode '%s': %v",
path, err)
return
}
if td.Version != version.Version {
log.Printf("[WARN] (dedup) created with different version (%s vs %s)",
td.Version, version.Version)
return
}
log.Printf("[INFO] (dedup) loading %d dependencies from '%s'",
len(td.Data), path)
// Update the data in the brain
for hashCode, value := range td.Data {
d.brain.ForceSet(hashCode, value)
}
// Trigger the updateCh
select {
case d.updateCh <- struct{}{}:
default:
}
}
func (d *DedupManager) attemptLock(client *consulapi.Client, session string, sessionCh chan struct{}, t *template.Template) {
defer d.wg.Done()
for {
log.Printf("[INFO] (dedup) attempting lock for template hash %s", t.ID())
basePath := path.Join(*d.config.Prefix, t.ID())
lopts := &consulapi.LockOptions{
Key: path.Join(basePath, "data"),
Value: templateNoData(),
Session: session,
MonitorRetries: 3,
MonitorRetryTime: 3 * time.Second,
LockWaitTime: lockWaitTime,
}
lock, err := client.LockOpts(lopts)
if err != nil {
log.Printf("[ERR] (dedup) failed to create lock '%s': %v",
lopts.Key, err)
return
}
var retryCh <-chan time.Time
leaderCh, err := lock.Lock(sessionCh)
if err != nil {
log.Printf("[ERR] (dedup) failed to acquire lock '%s': %v",
lopts.Key, err)
retryCh = time.After(lockRetry)
} else {
log.Printf("[INFO] (dedup) acquired lock '%s'", lopts.Key)
d.setLeader(t, leaderCh)
}
select {
case <-retryCh:
retryCh = nil
continue
case <-leaderCh:
log.Printf("[WARN] (dedup) lost lock ownership '%s'", lopts.Key)
d.setLeader(t, nil)
continue
case <-sessionCh:
log.Printf("[INFO] (dedup) releasing session '%s'", lopts.Key)
d.setLeader(t, nil)
_, err = client.Session().Destroy(session, nil)
if err != nil {
log.Printf("[ERROR] (dedup) failed destroying session '%s', %s", session, err)
}
return
case <-d.stopCh:
log.Printf("[INFO] (dedup) releasing lock '%s'", lopts.Key)
_, err = client.Session().Destroy(session, nil)
if err != nil {
log.Printf("[ERROR] (dedup) failed destroying session '%s', %s", session, err)
}
return
}
}
}

View File

@ -0,0 +1,31 @@
package manager
import "fmt"
// ErrExitable is an interface that defines an integer ExitStatus() function.
type ErrExitable interface {
ExitStatus() int
}
var _ error = new(ErrChildDied)
var _ ErrExitable = new(ErrChildDied)
// ErrChildDied is the error returned when the child process prematurely dies.
type ErrChildDied struct {
code int
}
// NewErrChildDied creates a new error with the given exit code.
func NewErrChildDied(c int) *ErrChildDied {
return &ErrChildDied{code: c}
}
// Error implements the error interface.
func (e *ErrChildDied) Error() string {
return fmt.Sprintf("child process died with exit code %d", e.code)
}
// ExitStatus implements the ErrExitable interface.
func (e *ErrChildDied) ExitStatus() int {
return e.code
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,22 @@
//+build !windows
package renderer
import (
"os"
"syscall"
)
func preserveFilePermissions(path string, fileInfo os.FileInfo) error {
sysInfo := fileInfo.Sys()
if sysInfo != nil {
stat, ok := sysInfo.(*syscall.Stat_t)
if ok {
if err := os.Chown(path, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,9 @@
//+build windows
package renderer
import "os"
func preserveFilePermissions(path string, fileInfo os.FileInfo) error {
return nil
}

View File

@ -0,0 +1,182 @@
package renderer
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"github.com/pkg/errors"
)
const (
// DefaultFilePerms are the default file permissions for files rendered onto
// disk when a specific file permission has not already been specified.
DefaultFilePerms = 0644
)
var (
// ErrNoParentDir is the error returned with the parent directory is missing
// and the user disabled it.
ErrNoParentDir = errors.New("parent directory is missing")
// ErrMissingDest is the error returned with the destination is empty.
ErrMissingDest = errors.New("missing destination")
)
// RenderInput is used as input to the render function.
type RenderInput struct {
Backup bool
Contents []byte
CreateDestDirs bool
Dry bool
DryStream io.Writer
Path string
Perms os.FileMode
}
// RenderResult is returned and stored. It contains the status of the render
// operation.
type RenderResult struct {
// DidRender indicates if the template rendered to disk. This will be false in
// the event of an error, but it will also be false in dry mode or when the
// template on disk matches the new result.
DidRender bool
// WouldRender indicates if the template would have rendered to disk. This
// will return false in the event of an error, but will return true in dry
// mode or when the template on disk matches the new result.
WouldRender bool
// Contents are the actual contents of the resulting template from the render
// operation.
Contents []byte
}
// Render atomically renders a file contents to disk, returning a result of
// whether it would have rendered and actually did render.
func Render(i *RenderInput) (*RenderResult, error) {
existing, err := ioutil.ReadFile(i.Path)
if err != nil && !os.IsNotExist(err) {
return nil, errors.Wrap(err, "failed reading file")
}
if bytes.Equal(existing, i.Contents) {
return &RenderResult{
DidRender: false,
WouldRender: true,
Contents: existing,
}, nil
}
if i.Dry {
fmt.Fprintf(i.DryStream, "> %s\n%s", i.Path, i.Contents)
} else {
if err := AtomicWrite(i.Path, i.CreateDestDirs, i.Contents, i.Perms, i.Backup); err != nil {
return nil, errors.Wrap(err, "failed writing file")
}
}
return &RenderResult{
DidRender: true,
WouldRender: true,
Contents: i.Contents,
}, nil
}
// AtomicWrite accepts a destination path and the template contents. It writes
// the template contents to a TempFile on disk, returning if any errors occur.
//
// If the parent destination directory does not exist, it will be created
// automatically with permissions 0755. To use a different permission, create
// the directory first or use `chmod` in a Command.
//
// If the destination path exists, all attempts will be made to preserve the
// existing file permissions. If those permissions cannot be read, an error is
// returned. If the file does not exist, it will be created automatically with
// permissions 0644. To use a different permission, create the destination file
// first or use `chmod` in a Command.
//
// If no errors occur, the Tempfile is "renamed" (moved) to the destination
// path.
func AtomicWrite(path string, createDestDirs bool, contents []byte, perms os.FileMode, backup bool) error {
if path == "" {
return ErrMissingDest
}
parent := filepath.Dir(path)
if _, err := os.Stat(parent); os.IsNotExist(err) {
if createDestDirs {
if err := os.MkdirAll(parent, 0755); err != nil {
return err
}
} else {
return ErrNoParentDir
}
}
f, err := ioutil.TempFile(parent, "")
if err != nil {
return err
}
defer os.Remove(f.Name())
if _, err := f.Write(contents); err != nil {
return err
}
if err := f.Sync(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
// If the user did not explicitly set permissions, attempt to lookup the
// current permissions on the file. If the file does not exist, fall back to
// the default. Otherwise, inherit the current permissions.
if perms == 0 {
currentInfo, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
perms = DefaultFilePerms
} else {
return err
}
} else {
perms = currentInfo.Mode()
// The file exists, so try to preserve the ownership as well.
if err := preserveFilePermissions(f.Name(), currentInfo); err != nil {
log.Printf("[WARN] (runner) could not preserve file permissions for %q: %v",
f.Name(), err)
}
}
}
if err := os.Chmod(f.Name(), perms); err != nil {
return err
}
// If we got this far, it means we are about to save the file. Copy the
// current file so we have a backup. Note that os.Link preserves the Mode.
if backup {
bak, old := path+".bak", path+".old.bak"
os.Rename(bak, old) // ignore error
if err := os.Link(path, bak); err != nil {
log.Printf("[WARN] (runner) could not backup %q: %v", path, err)
} else {
os.Remove(old) // ignore error
}
}
if err := os.Rename(f.Name(), path); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,32 @@
package signals
import (
"reflect"
"github.com/mitchellh/mapstructure"
)
// StringToSignalFunc parses a string as a signal based on the signal lookup
// table. If the user supplied an empty string or nil, a special "nil signal"
// is returned. Clients should check for this value and set the response back
// nil after mapstructure finishes parsing.
func StringToSignalFunc() mapstructure.DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t.String() != "os.Signal" {
return data, nil
}
if data == nil || data.(string) == "" {
return SIGNIL, nil
}
return Parse(data.(string))
}
}

View File

@ -0,0 +1,7 @@
package signals
// NilSignal is a special signal that is blank or "nil"
type NilSignal int
func (s *NilSignal) String() string { return "SIGNIL" }
func (s *NilSignal) Signal() {}

View File

@ -0,0 +1,35 @@
package signals
import (
"fmt"
"os"
"sort"
"strings"
)
// SIGNIL is the nil signal.
var SIGNIL os.Signal = new(NilSignal)
// ValidSignals is the list of all valid signals. This is built at runtime
// because it is OS-dependent.
var ValidSignals []string
func init() {
valid := make([]string, 0, len(SignalLookup))
for k := range SignalLookup {
valid = append(valid, k)
}
sort.Strings(valid)
ValidSignals = valid
}
// Parse parses the given string as a signal. If the signal is not found,
// an error is returned.
func Parse(s string) (os.Signal, error) {
sig, ok := SignalLookup[strings.ToUpper(s)]
if !ok {
return nil, fmt.Errorf("invalid signal %q - valid signals are %q",
s, ValidSignals)
}
return sig, nil
}

View File

@ -0,0 +1,40 @@
// +build linux darwin freebsd openbsd solaris netbsd
package signals
import (
"os"
"syscall"
)
var SignalLookup = map[string]os.Signal{
"SIGABRT": syscall.SIGABRT,
"SIGALRM": syscall.SIGALRM,
"SIGBUS": syscall.SIGBUS,
"SIGCHLD": syscall.SIGCHLD,
"SIGCONT": syscall.SIGCONT,
"SIGFPE": syscall.SIGFPE,
"SIGHUP": syscall.SIGHUP,
"SIGILL": syscall.SIGILL,
"SIGINT": syscall.SIGINT,
"SIGIO": syscall.SIGIO,
"SIGIOT": syscall.SIGIOT,
"SIGKILL": syscall.SIGKILL,
"SIGPIPE": syscall.SIGPIPE,
"SIGPROF": syscall.SIGPROF,
"SIGQUIT": syscall.SIGQUIT,
"SIGSEGV": syscall.SIGSEGV,
"SIGSTOP": syscall.SIGSTOP,
"SIGSYS": syscall.SIGSYS,
"SIGTERM": syscall.SIGTERM,
"SIGTRAP": syscall.SIGTRAP,
"SIGTSTP": syscall.SIGTSTP,
"SIGTTIN": syscall.SIGTTIN,
"SIGTTOU": syscall.SIGTTOU,
"SIGURG": syscall.SIGURG,
"SIGUSR1": syscall.SIGUSR1,
"SIGUSR2": syscall.SIGUSR2,
"SIGWINCH": syscall.SIGWINCH,
"SIGXCPU": syscall.SIGXCPU,
"SIGXFSZ": syscall.SIGXFSZ,
}

View File

@ -0,0 +1,24 @@
// +build windows
package signals
import (
"os"
"syscall"
)
var SignalLookup = map[string]os.Signal{
"SIGABRT": syscall.SIGABRT,
"SIGALRM": syscall.SIGALRM,
"SIGBUS": syscall.SIGBUS,
"SIGFPE": syscall.SIGFPE,
"SIGHUP": syscall.SIGHUP,
"SIGILL": syscall.SIGILL,
"SIGINT": syscall.SIGINT,
"SIGKILL": syscall.SIGKILL,
"SIGPIPE": syscall.SIGPIPE,
"SIGQUIT": syscall.SIGQUIT,
"SIGSEGV": syscall.SIGSEGV,
"SIGTERM": syscall.SIGTERM,
"SIGTRAP": syscall.SIGTRAP,
}

View File

@ -0,0 +1,74 @@
package template
import (
"sync"
dep "github.com/hashicorp/consul-template/dependency"
)
// Brain is what Template uses to determine the values that are
// available for template parsing.
type Brain struct {
sync.RWMutex
// data is the map of individual dependencies and the most recent data for
// that dependency.
data map[string]interface{}
// receivedData is an internal tracker of which dependencies have stored data
// in the brain.
receivedData map[string]struct{}
}
// NewBrain creates a new Brain with empty values for each
// of the key structs.
func NewBrain() *Brain {
return &Brain{
data: make(map[string]interface{}),
receivedData: make(map[string]struct{}),
}
}
// Remember accepts a dependency and the data to store associated with that
// dep. This function converts the given data to a proper type and stores
// it interally.
func (b *Brain) Remember(d dep.Dependency, data interface{}) {
b.Lock()
defer b.Unlock()
b.data[d.String()] = data
b.receivedData[d.String()] = struct{}{}
}
// Recall gets the current value for the given dependency in the Brain.
func (b *Brain) Recall(d dep.Dependency) (interface{}, bool) {
b.RLock()
defer b.RUnlock()
// If we have not received data for this dependency, return now.
if _, ok := b.receivedData[d.String()]; !ok {
return nil, false
}
return b.data[d.String()], true
}
// ForceSet is used to force set the value of a dependency
// for a given hash code
func (b *Brain) ForceSet(hashCode string, data interface{}) {
b.Lock()
defer b.Unlock()
b.data[hashCode] = data
b.receivedData[hashCode] = struct{}{}
}
// Forget accepts a dependency and removes all associated data with this
// dependency. It also resets the "receivedData" internal map.
func (b *Brain) Forget(d dep.Dependency) {
b.Lock()
defer b.Unlock()
delete(b.data, d.String())
delete(b.receivedData, d.String())
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,125 @@
package template
import (
"fmt"
"sort"
"sync"
)
// Scratch is a wrapper around a map which is used by the template.
type Scratch struct {
once sync.Once
sync.RWMutex
values map[string]interface{}
}
// Key returns a boolean indicating whether the given key exists in the map.
func (s *Scratch) Key(k string) bool {
s.RLock()
defer s.RUnlock()
_, ok := s.values[k]
return ok
}
// Get returns a value previously set by Add or Set
func (s *Scratch) Get(k string) interface{} {
s.RLock()
defer s.RUnlock()
return s.values[k]
}
// Set stores the value v at the key k. It will overwrite an existing value
// if present.
func (s *Scratch) Set(k string, v interface{}) string {
s.init()
s.Lock()
defer s.Unlock()
s.values[k] = v
return ""
}
// SetX behaves the same as Set, except it will not overwrite existing keys if
// already present.
func (s *Scratch) SetX(k string, v interface{}) string {
s.init()
s.Lock()
defer s.Unlock()
if _, ok := s.values[k]; !ok {
s.values[k] = v
}
return ""
}
// MapSet stores the value v into a key mk in the map named k.
func (s *Scratch) MapSet(k, mk string, v interface{}) (string, error) {
s.init()
s.Lock()
defer s.Unlock()
return s.mapSet(k, mk, v, true)
}
// MapSetX behaves the same as MapSet, except it will not overwrite the map
// key if it already exists.
func (s *Scratch) MapSetX(k, mk string, v interface{}) (string, error) {
s.init()
s.Lock()
defer s.Unlock()
return s.mapSet(k, mk, v, false)
}
// mapSet is sets the value in the map, overwriting if o is true. This function
// does not perform locking; callers should lock before invoking.
func (s *Scratch) mapSet(k, mk string, v interface{}, o bool) (string, error) {
if _, ok := s.values[k]; !ok {
s.values[k] = make(map[string]interface{})
}
typed, ok := s.values[k].(map[string]interface{})
if !ok {
return "", fmt.Errorf("%q is not a map", k)
}
if _, ok := typed[mk]; o || !ok {
typed[mk] = v
}
return "", nil
}
// MapValues returns the list of values in the map sorted by key.
func (s *Scratch) MapValues(k string) ([]interface{}, error) {
s.init()
s.Lock()
defer s.Unlock()
if s.values == nil {
return nil, nil
}
typed, ok := s.values[k].(map[string]interface{})
if !ok {
return nil, nil
}
keys := make([]string, 0, len(typed))
for k := range typed {
keys = append(keys, k)
}
sort.Strings(keys)
sorted := make([]interface{}, len(keys))
for i, k := range keys {
sorted[i] = typed[k]
}
return sorted, nil
}
// init initializes the scratch.
func (s *Scratch) init() {
if s.values == nil {
s.values = make(map[string]interface{})
}
}

View File

@ -0,0 +1,303 @@
package template
import (
"bytes"
"crypto/md5"
"encoding/hex"
"io/ioutil"
"text/template"
"github.com/pkg/errors"
dep "github.com/hashicorp/consul-template/dependency"
)
var (
// ErrTemplateContentsAndSource is the error returned when a template
// specifies both a "source" and "content" argument, which is not valid.
ErrTemplateContentsAndSource = errors.New("template: cannot specify both 'source' and 'content'")
// ErrTemplateMissingContentsAndSource is the error returned when a template
// does not specify either a "source" or "content" argument, which is not
// valid.
ErrTemplateMissingContentsAndSource = errors.New("template: must specify exactly one of 'source' or 'content'")
)
// Template is the internal representation of an individual template to process.
// The template retains the relationship between it's contents and is
// responsible for it's own execution.
type Template struct {
// contents is the string contents for the template. It is either given
// during template creation or read from disk when initialized.
contents string
// source is the original location of the template. This may be undefined if
// the template was dynamically defined.
source string
// leftDelim and rightDelim are the template delimiters.
leftDelim string
rightDelim string
// hexMD5 stores the hex version of the MD5
hexMD5 string
// errMissingKey causes the template processing to exit immediately if a map
// is indexed with a key that does not exist.
errMissingKey bool
// functionBlacklist are functions not permitted to be executed
// when we render this template
functionBlacklist []string
// sandboxPath adds a prefix to any path provided to the `file` function
// and causes an error if a relative path tries to traverse outside that
// prefix.
sandboxPath string
}
// NewTemplateInput is used as input when creating the template.
type NewTemplateInput struct {
// Source is the location on disk to the file.
Source string
// Contents are the raw template contents.
Contents string
// ErrMissingKey causes the template parser to exit immediately with an error
// when a map is indexed with a key that does not exist.
ErrMissingKey bool
// LeftDelim and RightDelim are the template delimiters.
LeftDelim string
RightDelim string
// FunctionBlacklist are functions not permitted to be executed
// when we render this template
FunctionBlacklist []string
// SandboxPath adds a prefix to any path provided to the `file` function
// and causes an error if a relative path tries to traverse outside that
// prefix.
SandboxPath string
}
// NewTemplate creates and parses a new Consul Template template at the given
// path. If the template does not exist, an error is returned. During
// initialization, the template is read and is parsed for dependencies. Any
// errors that occur are returned.
func NewTemplate(i *NewTemplateInput) (*Template, error) {
if i == nil {
i = &NewTemplateInput{}
}
// Validate that we are either given the path or the explicit contents
if i.Source != "" && i.Contents != "" {
return nil, ErrTemplateContentsAndSource
} else if i.Source == "" && i.Contents == "" {
return nil, ErrTemplateMissingContentsAndSource
}
var t Template
t.source = i.Source
t.contents = i.Contents
t.leftDelim = i.LeftDelim
t.rightDelim = i.RightDelim
t.errMissingKey = i.ErrMissingKey
t.functionBlacklist = i.FunctionBlacklist
t.sandboxPath = i.SandboxPath
if i.Source != "" {
contents, err := ioutil.ReadFile(i.Source)
if err != nil {
return nil, errors.Wrap(err, "failed to read template")
}
t.contents = string(contents)
}
// Compute the MD5, encode as hex
hash := md5.Sum([]byte(t.contents))
t.hexMD5 = hex.EncodeToString(hash[:])
return &t, nil
}
// ID returns the identifier for this template.
func (t *Template) ID() string {
return t.hexMD5
}
// Contents returns the raw contents of the template.
func (t *Template) Contents() string {
return t.contents
}
// Source returns the filepath source of this template.
func (t *Template) Source() string {
if t.source == "" {
return "(dynamic)"
}
return t.source
}
// ExecuteInput is used as input to the template's execute function.
type ExecuteInput struct {
// Brain is the brain where data for the template is stored.
Brain *Brain
// Env is a custom environment provided to the template for envvar resolution.
// Values specified here will take precedence over any values in the
// environment when using the `env` function.
Env []string
}
// ExecuteResult is the result of the template execution.
type ExecuteResult struct {
// Used is the set of dependencies that were used.
Used *dep.Set
// Missing is the set of dependencies that were missing.
Missing *dep.Set
// Output is the rendered result.
Output []byte
}
// Execute evaluates this template in the provided context.
func (t *Template) Execute(i *ExecuteInput) (*ExecuteResult, error) {
if i == nil {
i = &ExecuteInput{}
}
var used, missing dep.Set
tmpl := template.New("")
tmpl.Delims(t.leftDelim, t.rightDelim)
tmpl.Funcs(funcMap(&funcMapInput{
t: tmpl,
brain: i.Brain,
env: i.Env,
used: &used,
missing: &missing,
functionBlacklist: t.functionBlacklist,
sandboxPath: t.sandboxPath,
}))
if t.errMissingKey {
tmpl.Option("missingkey=error")
} else {
tmpl.Option("missingkey=zero")
}
tmpl, err := tmpl.Parse(t.contents)
if err != nil {
return nil, errors.Wrap(err, "parse")
}
// Execute the template into the writer
var b bytes.Buffer
if err := tmpl.Execute(&b, nil); err != nil {
return nil, errors.Wrap(err, "execute")
}
return &ExecuteResult{
Used: &used,
Missing: &missing,
Output: b.Bytes(),
}, nil
}
// funcMapInput is input to the funcMap, which builds the template functions.
type funcMapInput struct {
t *template.Template
brain *Brain
env []string
functionBlacklist []string
sandboxPath string
used *dep.Set
missing *dep.Set
}
// funcMap is the map of template functions to their respective functions.
func funcMap(i *funcMapInput) template.FuncMap {
var scratch Scratch
r := template.FuncMap{
// API functions
"datacenters": datacentersFunc(i.brain, i.used, i.missing),
"file": fileFunc(i.brain, i.used, i.missing, i.sandboxPath),
"key": keyFunc(i.brain, i.used, i.missing),
"keyExists": keyExistsFunc(i.brain, i.used, i.missing),
"keyOrDefault": keyWithDefaultFunc(i.brain, i.used, i.missing),
"ls": lsFunc(i.brain, i.used, i.missing, true),
"safeLs": safeLsFunc(i.brain, i.used, i.missing),
"node": nodeFunc(i.brain, i.used, i.missing),
"nodes": nodesFunc(i.brain, i.used, i.missing),
"secret": secretFunc(i.brain, i.used, i.missing),
"secrets": secretsFunc(i.brain, i.used, i.missing),
"service": serviceFunc(i.brain, i.used, i.missing),
"services": servicesFunc(i.brain, i.used, i.missing),
"tree": treeFunc(i.brain, i.used, i.missing, true),
"safeTree": safeTreeFunc(i.brain, i.used, i.missing),
// Scratch
"scratch": func() *Scratch { return &scratch },
// Helper functions
"base64Decode": base64Decode,
"base64Encode": base64Encode,
"base64URLDecode": base64URLDecode,
"base64URLEncode": base64URLEncode,
"byKey": byKey,
"byTag": byTag,
"contains": contains,
"containsAll": containsSomeFunc(true, true),
"containsAny": containsSomeFunc(false, false),
"containsNone": containsSomeFunc(true, false),
"containsNotAll": containsSomeFunc(false, true),
"env": envFunc(i.env),
"executeTemplate": executeTemplateFunc(i.t),
"explode": explode,
"explodeMap": explodeMap,
"in": in,
"indent": indent,
"loop": loop,
"join": join,
"trimSpace": trimSpace,
"parseBool": parseBool,
"parseFloat": parseFloat,
"parseInt": parseInt,
"parseJSON": parseJSON,
"parseUint": parseUint,
"plugin": plugin,
"regexReplaceAll": regexReplaceAll,
"regexMatch": regexMatch,
"replaceAll": replaceAll,
"timestamp": timestamp,
"toLower": toLower,
"toJSON": toJSON,
"toJSONPretty": toJSONPretty,
"toTitle": toTitle,
"toTOML": toTOML,
"toUpper": toUpper,
"toYAML": toYAML,
"split": split,
"byMeta": byMeta,
"sockaddr": sockaddr,
// Math functions
"add": add,
"subtract": subtract,
"multiply": multiply,
"divide": divide,
"modulo": modulo,
}
for _, bf := range i.functionBlacklist {
if _, ok := r[bf]; ok {
r[bf] = blacklisted
}
}
return r
}

View File

@ -0,0 +1,12 @@
package version
import "fmt"
const Version = "0.22.0"
var (
Name string
GitCommit string
HumanVersion = fmt.Sprintf("%s v%s (%s)", Name, Version, GitCommit)
)

View File

@ -0,0 +1,308 @@
package watch
import (
"fmt"
"log"
"math/rand"
"reflect"
"sync"
"time"
dep "github.com/hashicorp/consul-template/dependency"
)
const (
// The amount of time to do a blocking query for
defaultWaitTime = 60 * time.Second
)
// View is a representation of a Dependency and the most recent data it has
// received from Consul.
type View struct {
// dependency is the dependency that is associated with this View
dependency dep.Dependency
// clients is the list of clients to communicate upstream. This is passed
// directly to the dependency.
clients *dep.ClientSet
// data is the most-recently-received data from Consul for this View. It is
// accompanied by a series of locks and booleans to ensure consistency.
dataLock sync.RWMutex
data interface{}
receivedData bool
lastIndex uint64
// maxStale is the maximum amount of time to allow a query to be stale.
maxStale time.Duration
// once determines if this view should receive data exactly once.
once bool
// retryFunc is the function to invoke on failure to determine if a retry
// should be attempted.
retryFunc RetryFunc
// stopCh is used to stop polling on this View
stopCh chan struct{}
// vaultGrace is the grace period between a lease and the max TTL for which
// Consul Template will generate a new secret instead of renewing an existing
// one.
vaultGrace time.Duration
}
// NewViewInput is used as input to the NewView function.
type NewViewInput struct {
// Dependency is the dependency to associate with the new view.
Dependency dep.Dependency
// Clients is the list of clients to communicate upstream. This is passed
// directly to the dependency.
Clients *dep.ClientSet
// MaxStale is the maximum amount a time a query response is allowed to be
// stale before forcing a read from the leader.
MaxStale time.Duration
// Once indicates this view should poll for data exactly one time.
Once bool
// RetryFunc is a function which dictates how this view should retry on
// upstream errors.
RetryFunc RetryFunc
// VaultGrace is the grace period between a lease and the max TTL for which
// Consul Template will generate a new secret instead of renewing an existing
// one.
VaultGrace time.Duration
}
// NewView constructs a new view with the given inputs.
func NewView(i *NewViewInput) (*View, error) {
return &View{
dependency: i.Dependency,
clients: i.Clients,
maxStale: i.MaxStale,
once: i.Once,
retryFunc: i.RetryFunc,
stopCh: make(chan struct{}, 1),
vaultGrace: i.VaultGrace,
}, nil
}
// Dependency returns the dependency attached to this View.
func (v *View) Dependency() dep.Dependency {
return v.dependency
}
// Data returns the most-recently-received data from Consul for this View.
func (v *View) Data() interface{} {
v.dataLock.RLock()
defer v.dataLock.RUnlock()
return v.data
}
// DataAndLastIndex returns the most-recently-received data from Consul for
// this view, along with the last index. This is atomic so you will get the
// index that goes with the data you are fetching.
func (v *View) DataAndLastIndex() (interface{}, uint64) {
v.dataLock.RLock()
defer v.dataLock.RUnlock()
return v.data, v.lastIndex
}
// poll queries the Consul instance for data using the fetch function, but also
// accounts for interrupts on the interrupt channel. This allows the poll
// function to be fired in a goroutine, but then halted even if the fetch
// function is in the middle of a blocking query.
func (v *View) poll(viewCh chan<- *View, errCh chan<- error) {
var retries int
for {
doneCh := make(chan struct{}, 1)
successCh := make(chan struct{}, 1)
fetchErrCh := make(chan error, 1)
go v.fetch(doneCh, successCh, fetchErrCh)
WAIT:
select {
case <-doneCh:
// Reset the retry to avoid exponentially incrementing retries when we
// have some successful requests
retries = 0
log.Printf("[TRACE] (view) %s received data", v.dependency)
select {
case <-v.stopCh:
return
case viewCh <- v:
}
// If we are operating in once mode, do not loop - we received data at
// least once which is the API promise here.
if v.once {
return
}
case <-successCh:
// We successfully received a non-error response from the server. This
// does not mean we have data (that's dataCh's job), but rather this
// just resets the counter indicating we communicated successfully. For
// example, Consul make have an outage, but when it returns, the view
// is unchanged. We have to reset the counter retries, but not update the
// actual template.
log.Printf("[TRACE] (view) %s successful contact, resetting retries", v.dependency)
retries = 0
goto WAIT
case err := <-fetchErrCh:
if v.retryFunc != nil {
retry, sleep := v.retryFunc(retries)
if retry {
log.Printf("[WARN] (view) %s (retry attempt %d after %q)",
err, retries+1, sleep)
select {
case <-time.After(sleep):
retries++
continue
case <-v.stopCh:
return
}
}
}
log.Printf("[ERR] (view) %s (exceeded maximum retries)", err)
// Push the error back up to the watcher
select {
case <-v.stopCh:
return
case errCh <- err:
return
}
case <-v.stopCh:
log.Printf("[TRACE] (view) %s stopping poll (received on view stopCh)", v.dependency)
return
}
}
}
// fetch queries the Consul instance for the attached dependency. This API
// promises that either data will be written to doneCh or an error will be
// written to errCh. It is designed to be run in a goroutine that selects the
// result of doneCh and errCh. It is assumed that only one instance of fetch
// is running per View and therefore no locking or mutexes are used.
func (v *View) fetch(doneCh, successCh chan<- struct{}, errCh chan<- error) {
log.Printf("[TRACE] (view) %s starting fetch", v.dependency)
var allowStale bool
if v.maxStale != 0 {
allowStale = true
}
for {
// If the view was stopped, short-circuit this loop. This prevents a bug
// where a view can get "lost" in the event Consul Template is reloaded.
select {
case <-v.stopCh:
return
default:
}
start := time.Now() // for rateLimiter below
data, rm, err := v.dependency.Fetch(v.clients, &dep.QueryOptions{
AllowStale: allowStale,
WaitTime: defaultWaitTime,
WaitIndex: v.lastIndex,
VaultGrace: v.vaultGrace,
})
if err != nil {
if err == dep.ErrStopped {
log.Printf("[TRACE] (view) %s reported stop", v.dependency)
} else {
errCh <- err
}
return
}
if rm == nil {
errCh <- fmt.Errorf("received nil response metadata - this is a bug " +
"and should be reported")
return
}
// If we got this far, we received data successfully. That data might not
// trigger a data update (because we could continue below), but we need to
// inform the poller to reset the retry count.
log.Printf("[TRACE] (view) %s marking successful data response", v.dependency)
select {
case successCh <- struct{}{}:
default:
}
if allowStale && rm.LastContact > v.maxStale {
allowStale = false
log.Printf("[TRACE] (view) %s stale data (last contact exceeded max_stale)", v.dependency)
continue
}
if v.maxStale != 0 {
allowStale = true
}
if dur := rateLimiter(start); dur > 1 {
time.Sleep(dur)
}
if rm.LastIndex == v.lastIndex {
log.Printf("[TRACE] (view) %s no new data (index was the same)", v.dependency)
continue
}
v.dataLock.Lock()
if rm.LastIndex < v.lastIndex {
log.Printf("[TRACE] (view) %s had a lower index, resetting", v.dependency)
v.lastIndex = 0
v.dataLock.Unlock()
continue
}
v.lastIndex = rm.LastIndex
if v.receivedData && reflect.DeepEqual(data, v.data) {
log.Printf("[TRACE] (view) %s no new data (contents were the same)", v.dependency)
v.dataLock.Unlock()
continue
}
if data == nil && rm.Block {
log.Printf("[TRACE] (view) %s asked for blocking query", v.dependency)
v.dataLock.Unlock()
continue
}
v.data = data
v.receivedData = true
v.dataLock.Unlock()
close(doneCh)
return
}
}
const minDelayBetweenUpdates = time.Millisecond * 100
// return a duration to sleep to limit the frequency of upstream calls
func rateLimiter(start time.Time) time.Duration {
remaining := minDelayBetweenUpdates - time.Since(start)
if remaining > 0 {
dither := time.Duration(rand.Int63n(20000000)) // 0-20ms
return remaining + dither
}
return 0
}
// stop halts polling of this view.
func (v *View) stop() {
v.dependency.Stop()
close(v.stopCh)
}

View File

@ -0,0 +1,253 @@
package watch
import (
"log"
"sync"
"time"
dep "github.com/hashicorp/consul-template/dependency"
"github.com/pkg/errors"
)
// dataBufferSize is the default number of views to process in a batch.
const dataBufferSize = 2048
type RetryFunc func(int) (bool, time.Duration)
// Watcher is a top-level manager for views that poll Consul for data.
type Watcher struct {
sync.Mutex
// clients is the collection of API clients to talk to upstreams.
clients *dep.ClientSet
// dataCh is the chan where Views will be published.
dataCh chan *View
// errCh is the chan where any errors will be published.
errCh chan error
// depViewMap is a map of Templates to Views. Templates are keyed by
// their string.
depViewMap map[string]*View
// maxStale specifies the maximum staleness of a query response.
maxStale time.Duration
// once signals if this watcher should tell views to retrieve data exactly
// one time instead of polling infinitely.
once bool
// retryFuncs specifies the different ways to retry based on the upstream.
retryFuncConsul RetryFunc
retryFuncDefault RetryFunc
retryFuncVault RetryFunc
// vaultGrace is the grace period between a lease and the max TTL for which
// Consul Template will generate a new secret instead of renewing an existing
// one.
vaultGrace time.Duration
}
type NewWatcherInput struct {
// Clients is the client set to communicate with upstreams.
Clients *dep.ClientSet
// MaxStale is the maximum staleness of a query.
MaxStale time.Duration
// Once specifies this watcher should tell views to poll exactly once.
Once bool
// RenewVault indicates if this watcher should renew Vault tokens.
RenewVault bool
// VaultToken is the vault token to renew.
VaultToken string
// VaultAgentTokenFile is the path to Vault Agent token file
VaultAgentTokenFile string
// RetryFuncs specify the different ways to retry based on the upstream.
RetryFuncConsul RetryFunc
RetryFuncDefault RetryFunc
RetryFuncVault RetryFunc
// VaultGrace is the grace period between a lease and the max TTL for which
// Consul Template will generate a new secret instead of renewing an existing
// one.
VaultGrace time.Duration
}
// NewWatcher creates a new watcher using the given API client.
func NewWatcher(i *NewWatcherInput) (*Watcher, error) {
w := &Watcher{
clients: i.Clients,
depViewMap: make(map[string]*View),
dataCh: make(chan *View, dataBufferSize),
errCh: make(chan error),
maxStale: i.MaxStale,
once: i.Once,
retryFuncConsul: i.RetryFuncConsul,
retryFuncDefault: i.RetryFuncDefault,
retryFuncVault: i.RetryFuncVault,
vaultGrace: i.VaultGrace,
}
// Start a watcher for the Vault renew if that config was specified
if i.RenewVault {
vt, err := dep.NewVaultTokenQuery(i.VaultToken)
if err != nil {
return nil, errors.Wrap(err, "watcher")
}
if _, err := w.Add(vt); err != nil {
return nil, errors.Wrap(err, "watcher")
}
}
if len(i.VaultAgentTokenFile) > 0 {
vag, err := dep.NewVaultAgentTokenQuery(i.VaultAgentTokenFile)
if err != nil {
return nil, errors.Wrap(err, "watcher")
}
if _, err := w.Add(vag); err != nil {
return nil, errors.Wrap(err, "watcher")
}
}
return w, nil
}
// DataCh returns a read-only channel of Views which is populated when a view
// receives data from its upstream.
func (w *Watcher) DataCh() <-chan *View {
return w.dataCh
}
// ErrCh returns a read-only channel of errors returned by the upstream.
func (w *Watcher) ErrCh() <-chan error {
return w.errCh
}
// Add adds the given dependency to the list of monitored dependencies
// and start the associated view. If the dependency already exists, no action is
// taken.
//
// If the Dependency already existed, it this function will return false. If the
// view was successfully created, it will return true. If an error occurs while
// creating the view, it will be returned here (but future errors returned by
// the view will happen on the channel).
func (w *Watcher) Add(d dep.Dependency) (bool, error) {
w.Lock()
defer w.Unlock()
log.Printf("[DEBUG] (watcher) adding %s", d)
if _, ok := w.depViewMap[d.String()]; ok {
log.Printf("[TRACE] (watcher) %s already exists, skipping", d)
return false, nil
}
// Choose the correct retry function based off of the dependency's type.
var retryFunc RetryFunc
switch d.Type() {
case dep.TypeConsul:
retryFunc = w.retryFuncConsul
case dep.TypeVault:
retryFunc = w.retryFuncVault
default:
retryFunc = w.retryFuncDefault
}
v, err := NewView(&NewViewInput{
Dependency: d,
Clients: w.clients,
MaxStale: w.maxStale,
Once: w.once,
RetryFunc: retryFunc,
VaultGrace: w.vaultGrace,
})
if err != nil {
return false, errors.Wrap(err, "watcher")
}
log.Printf("[TRACE] (watcher) %s starting", d)
w.depViewMap[d.String()] = v
go v.poll(w.dataCh, w.errCh)
return true, nil
}
// Watching determines if the given dependency is being watched.
func (w *Watcher) Watching(d dep.Dependency) bool {
w.Lock()
defer w.Unlock()
_, ok := w.depViewMap[d.String()]
return ok
}
// ForceWatching is used to force setting the internal state of watching
// a dependency. This is only used for unit testing purposes.
func (w *Watcher) ForceWatching(d dep.Dependency, enabled bool) {
w.Lock()
defer w.Unlock()
if enabled {
w.depViewMap[d.String()] = nil
} else {
delete(w.depViewMap, d.String())
}
}
// Remove removes the given dependency from the list and stops the
// associated View. If a View for the given dependency does not exist, this
// function will return false. If the View does exist, this function will return
// true upon successful deletion.
func (w *Watcher) Remove(d dep.Dependency) bool {
w.Lock()
defer w.Unlock()
log.Printf("[DEBUG] (watcher) removing %s", d)
if view, ok := w.depViewMap[d.String()]; ok {
log.Printf("[TRACE] (watcher) actually removing %s", d)
view.stop()
delete(w.depViewMap, d.String())
return true
}
log.Printf("[TRACE] (watcher) %s did not exist, skipping", d)
return false
}
// Size returns the number of views this watcher is watching.
func (w *Watcher) Size() int {
w.Lock()
defer w.Unlock()
return len(w.depViewMap)
}
// Stop halts this watcher and any currently polling views immediately. If a
// view was in the middle of a poll, no data will be returned.
func (w *Watcher) Stop() {
w.Lock()
defer w.Unlock()
log.Printf("[DEBUG] (watcher) stopping all views")
for _, view := range w.depViewMap {
if view == nil {
continue
}
log.Printf("[TRACE] (watcher) stopping %s", view.Dependency())
view.stop()
}
// Reset the map to have no views
w.depViewMap = make(map[string]*View)
// Close any idle TCP connections
w.clients.Stop()
}

View File

@ -4,7 +4,10 @@ import (
"fmt"
"io"
"io/ioutil"
"net/url"
"time"
"github.com/mitchellh/mapstructure"
)
const (
@ -19,18 +22,26 @@ type ACLTokenPolicyLink struct {
ID string
Name string
}
type ACLTokenRoleLink struct {
ID string
Name string
}
// ACLToken represents an ACL Token
type ACLToken struct {
CreateIndex uint64
ModifyIndex uint64
AccessorID string
SecretID string
Description string
Policies []*ACLTokenPolicyLink
Local bool
CreateTime time.Time `json:",omitempty"`
Hash []byte `json:",omitempty"`
CreateIndex uint64
ModifyIndex uint64
AccessorID string
SecretID string
Description string
Policies []*ACLTokenPolicyLink `json:",omitempty"`
Roles []*ACLTokenRoleLink `json:",omitempty"`
ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
Local bool
ExpirationTTL time.Duration `json:",omitempty"`
ExpirationTime *time.Time `json:",omitempty"`
CreateTime time.Time `json:",omitempty"`
Hash []byte `json:",omitempty"`
// DEPRECATED (ACL-Legacy-Compat)
// Rules will only be present for legacy tokens returned via the new APIs
@ -38,15 +49,18 @@ type ACLToken struct {
}
type ACLTokenListEntry struct {
CreateIndex uint64
ModifyIndex uint64
AccessorID string
Description string
Policies []*ACLTokenPolicyLink
Local bool
CreateTime time.Time
Hash []byte
Legacy bool
CreateIndex uint64
ModifyIndex uint64
AccessorID string
Description string
Policies []*ACLTokenPolicyLink `json:",omitempty"`
Roles []*ACLTokenRoleLink `json:",omitempty"`
ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
Local bool
ExpirationTime *time.Time `json:",omitempty"`
CreateTime time.Time
Hash []byte
Legacy bool
}
// ACLEntry is used to represent a legacy ACL token
@ -67,11 +81,20 @@ type ACLReplicationStatus struct {
SourceDatacenter string
ReplicationType string
ReplicatedIndex uint64
ReplicatedRoleIndex uint64
ReplicatedTokenIndex uint64
LastSuccess time.Time
LastError time.Time
}
// ACLServiceIdentity represents a high-level grant of all necessary privileges
// to assume the identity of the named Service in the Catalog and within
// Connect.
type ACLServiceIdentity struct {
ServiceName string
Datacenters []string `json:",omitempty"`
}
// ACLPolicy represents an ACL Policy.
type ACLPolicy struct {
ID string
@ -94,6 +117,113 @@ type ACLPolicyListEntry struct {
ModifyIndex uint64
}
type ACLRolePolicyLink struct {
ID string
Name string
}
// ACLRole represents an ACL Role.
type ACLRole struct {
ID string
Name string
Description string
Policies []*ACLRolePolicyLink `json:",omitempty"`
ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// BindingRuleBindType is the type of binding rule mechanism used.
type BindingRuleBindType string
const (
// BindingRuleBindTypeService binds to a service identity with the given name.
BindingRuleBindTypeService BindingRuleBindType = "service"
// BindingRuleBindTypeRole binds to pre-existing roles with the given name.
BindingRuleBindTypeRole BindingRuleBindType = "role"
)
type ACLBindingRule struct {
ID string
Description string
AuthMethod string
Selector string
BindType BindingRuleBindType
BindName string
CreateIndex uint64
ModifyIndex uint64
}
type ACLAuthMethod struct {
Name string
Type string
Description string
// Configuration is arbitrary configuration for the auth method. This
// should only contain primitive values and containers (such as lists and
// maps).
Config map[string]interface{}
CreateIndex uint64
ModifyIndex uint64
}
type ACLAuthMethodListEntry struct {
Name string
Type string
Description string
CreateIndex uint64
ModifyIndex uint64
}
// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed
// KubernetesAuthMethodConfig.
func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) {
var config KubernetesAuthMethodConfig
decodeConf := &mapstructure.DecoderConfig{
Result: &config,
WeaklyTypedInput: true,
}
decoder, err := mapstructure.NewDecoder(decodeConf)
if err != nil {
return nil, err
}
if err := decoder.Decode(raw); err != nil {
return nil, fmt.Errorf("error decoding config: %s", err)
}
return &config, nil
}
// KubernetesAuthMethodConfig is the config for the built-in Consul auth method
// for Kubernetes.
type KubernetesAuthMethodConfig struct {
Host string `json:",omitempty"`
CACert string `json:",omitempty"`
ServiceAccountJWT string `json:",omitempty"`
}
// RenderToConfig converts this into a map[string]interface{} suitable for use
// in the ACLAuthMethod.Config field.
func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} {
return map[string]interface{}{
"Host": c.Host,
"CACert": c.CACert,
"ServiceAccountJWT": c.ServiceAccountJWT,
}
}
type ACLLoginParams struct {
AuthMethod string
BearerToken string
Meta map[string]string `json:",omitempty"`
}
// ACL can be used to query the ACL endpoints
type ACL struct {
c *Client
@ -266,17 +396,9 @@ func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, e
return entries, qm, nil
}
// TokenCreate creates a new ACL token. It requires that the AccessorID and SecretID fields
// of the ACLToken structure to be empty as these will be filled in by Consul.
// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields
// of the ACLToken structure are empty they will be filled in by Consul.
func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
if token.AccessorID != "" {
return nil, nil, fmt.Errorf("Cannot specify an AccessorID in Token Creation")
}
if token.SecretID != "" {
return nil, nil, fmt.Errorf("Cannot specify a SecretID in Token Creation")
}
r := a.c.newRequest("PUT", "/v1/acl/token")
r.setWriteOptions(q)
r.obj = token
@ -437,7 +559,6 @@ func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri
if policy.ID != "" {
return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation")
}
r := a.c.newRequest("PUT", "/v1/acl/policy")
r.setWriteOptions(q)
r.obj = policy
@ -460,7 +581,7 @@ func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri
// existing policy ID
func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) {
if policy.ID == "" {
return nil, nil, fmt.Errorf("Must specify an ID in Policy Creation")
return nil, nil, fmt.Errorf("Must specify an ID in Policy Update")
}
r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID)
@ -586,3 +707,410 @@ func (a *ACL) RulesTranslateToken(tokenID string) (string, error) {
return string(ruleBytes), nil
}
// RoleCreate will create a new role. It is not allowed for the role parameters
// ID field to be set as this will be generated by Consul while processing the request.
func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) {
if role.ID != "" {
return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation")
}
r := a.c.newRequest("PUT", "/v1/acl/role")
r.setWriteOptions(q)
r.obj = role
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out ACLRole
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, wm, nil
}
// RoleUpdate updates a role. The ID field of the role parameter must be set to an
// existing role ID
func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) {
if role.ID == "" {
return nil, nil, fmt.Errorf("Must specify an ID in Role Update")
}
r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID)
r.setWriteOptions(q)
r.obj = role
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out ACLRole
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, wm, nil
}
// RoleDelete deletes a role given its ID.
func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID)
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// RoleRead retrieves the role details (by ID). Returns nil if not found.
func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/role/"+roleID)
r.setQueryOptions(q)
found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if !found {
return nil, qm, nil
}
var out ACLRole
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// RoleReadByName retrieves the role details (by name). Returns nil if not found.
func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName))
r.setQueryOptions(q)
found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if !found {
return nil, qm, nil
}
var out ACLRole
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// RoleList retrieves a listing of all roles. The listing does not include some
// metadata for the role as those should be retrieved by subsequent calls to
// RoleRead.
func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/roles")
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLRole
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// AuthMethodCreate will create a new auth method.
func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) {
if method.Name == "" {
return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation")
}
r := a.c.newRequest("PUT", "/v1/acl/auth-method")
r.setWriteOptions(q)
r.obj = method
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out ACLAuthMethod
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, wm, nil
}
// AuthMethodUpdate updates an auth method.
func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) {
if method.Name == "" {
return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update")
}
r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name))
r.setWriteOptions(q)
r.obj = method
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out ACLAuthMethod
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, wm, nil
}
// AuthMethodDelete deletes an auth method given its Name.
func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) {
if methodName == "" {
return nil, fmt.Errorf("Must specify a Name in Auth Method Delete")
}
r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName))
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// AuthMethodRead retrieves the auth method. Returns nil if not found.
func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) {
if methodName == "" {
return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read")
}
r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName))
r.setQueryOptions(q)
found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if !found {
return nil, qm, nil
}
var out ACLAuthMethod
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// AuthMethodList retrieves a listing of all auth methods. The listing does not
// include some metadata for the auth method as those should be retrieved by
// subsequent calls to AuthMethodRead.
func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/auth-methods")
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLAuthMethodListEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// BindingRuleCreate will create a new binding rule. It is not allowed for the
// binding rule parameter's ID field to be set as this will be generated by
// Consul while processing the request.
func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) {
if rule.ID != "" {
return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation")
}
r := a.c.newRequest("PUT", "/v1/acl/binding-rule")
r.setWriteOptions(q)
r.obj = rule
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out ACLBindingRule
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, wm, nil
}
// BindingRuleUpdate updates a binding rule. The ID field of the role binding
// rule parameter must be set to an existing binding rule ID.
func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) {
if rule.ID == "" {
return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update")
}
r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID)
r.setWriteOptions(q)
r.obj = rule
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out ACLBindingRule
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, wm, nil
}
// BindingRuleDelete deletes a binding rule given its ID.
func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID)
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}
// BindingRuleRead retrieves the binding rule details. Returns nil if not found.
func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID)
r.setQueryOptions(q)
found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if !found {
return nil, qm, nil
}
var out ACLBindingRule
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// BindingRuleList retrieves a listing of all binding rules.
func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/acl/binding-rules")
if methodName != "" {
r.params.Set("authmethod", methodName)
}
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*ACLBindingRule
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// Login is used to exchange auth method credentials for a newly-minted Consul Token.
func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
r := a.c.newRequest("POST", "/v1/acl/login")
r.setWriteOptions(q)
r.obj = auth
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out ACLToken
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, wm, nil
}
// Logout is used to destroy a Consul Token created via Login().
func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) {
r := a.c.newRequest("POST", "/v1/acl/logout")
r.setWriteOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}

View File

@ -84,11 +84,11 @@ type AgentService struct {
Address string
Weights AgentWeights
EnableTagOverride bool
CreateIndex uint64 `json:",omitempty"`
ModifyIndex uint64 `json:",omitempty"`
ContentHash string `json:",omitempty"`
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
ContentHash string `json:",omitempty" bexpr:"-"`
// DEPRECATED (ProxyDestination) - remove this field
ProxyDestination string `json:",omitempty"`
ProxyDestination string `json:",omitempty" bexpr:"-"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
}
@ -103,8 +103,8 @@ type AgentServiceChecksInfo struct {
// AgentServiceConnect represents the Connect configuration of a service.
type AgentServiceConnect struct {
Native bool `json:",omitempty"`
Proxy *AgentServiceConnectProxy `json:",omitempty"`
SidecarService *AgentServiceRegistration `json:",omitempty"`
Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"`
SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
}
// AgentServiceConnectProxy represents the Connect Proxy configuration of a
@ -112,7 +112,7 @@ type AgentServiceConnect struct {
type AgentServiceConnectProxy struct {
ExecMode ProxyExecMode `json:",omitempty"`
Command []string `json:",omitempty"`
Config map[string]interface{} `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
Upstreams []Upstream `json:",omitempty"`
}
@ -123,7 +123,7 @@ type AgentServiceConnectProxyConfig struct {
DestinationServiceID string `json:",omitempty"`
LocalServiceAddress string `json:",omitempty"`
LocalServicePort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
Upstreams []Upstream
}
@ -278,9 +278,9 @@ type ConnectProxyConfig struct {
ContentHash string
// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
// but they don't need ExecMode or Command
ExecMode ProxyExecMode `json:",omitempty"`
Command []string `json:",omitempty"`
Config map[string]interface{}
ExecMode ProxyExecMode `json:",omitempty"`
Command []string `json:",omitempty"`
Config map[string]interface{} `bexpr:"-"`
Upstreams []Upstream
}
@ -292,7 +292,7 @@ type Upstream struct {
Datacenter string `json:",omitempty"`
LocalBindAddress string `json:",omitempty"`
LocalBindPort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
}
// Agent can be used to query the Agent endpoints
@ -387,7 +387,14 @@ func (a *Agent) NodeName() (string, error) {
// Checks returns the locally registered checks
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
return a.ChecksWithFilter("")
}
// ChecksWithFilter returns a subset of the locally registered checks that match
// the given filter expression
func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
r := a.c.newRequest("GET", "/v1/agent/checks")
r.filterQuery(filter)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
@ -403,7 +410,14 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) {
// Services returns the locally registered services
func (a *Agent) Services() (map[string]*AgentService, error) {
return a.ServicesWithFilter("")
}
// ServicesWithFilter returns a subset of the locally registered services that match
// the given filter expression
func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
r := a.c.newRequest("GET", "/v1/agent/services")
r.filterQuery(filter)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err

View File

@ -30,6 +30,10 @@ const (
// the HTTP token.
HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
// HTTPTokenFileEnvName defines an environment variable name which sets
// the HTTP token file.
HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE"
// HTTPAuthEnvName defines an environment variable name which sets
// the HTTP authentication header.
HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
@ -146,6 +150,10 @@ type QueryOptions struct {
// ctx is an optional context pass through to the underlying HTTP
// request layer. Use Context() and WithContext() to manage this.
ctx context.Context
// Filter requests filtering data prior to it being returned. The string
// is a go-bexpr compatible expression.
Filter string
}
func (o *QueryOptions) Context() context.Context {
@ -276,6 +284,10 @@ type Config struct {
// which overrides the agent's default token.
Token string
// TokenFile is a file containing the current token to use for this client.
// If provided it is read once at startup and never again.
TokenFile string
TLSConfig TLSConfig
}
@ -339,6 +351,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config {
config.Address = addr
}
if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" {
config.TokenFile = tokenFile
}
if token := os.Getenv(HTTPTokenEnvName); token != "" {
config.Token = token
}
@ -445,6 +461,7 @@ func (c *Config) GenerateEnv() []string {
env = append(env,
fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile),
fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
@ -537,6 +554,19 @@ func NewClient(config *Config) (*Client, error) {
config.Address = parts[1]
}
// If the TokenFile is set, always use that, even if a Token is configured.
// This is because when TokenFile is set it is read into the Token field.
// We want any derived clients to have to re-read the token file.
if config.TokenFile != "" {
data, err := ioutil.ReadFile(config.TokenFile)
if err != nil {
return nil, fmt.Errorf("Error loading token file: %s", err)
}
if token := strings.TrimSpace(string(data)); token != "" {
config.Token = token
}
}
if config.Token == "" {
config.Token = defConfig.Token
}
@ -614,6 +644,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
if q.Near != "" {
r.params.Set("near", q.Near)
}
if q.Filter != "" {
r.params.Set("filter", q.Filter)
}
if len(q.NodeMeta) > 0 {
for key, value := range q.NodeMeta {
r.params.Add("node-meta", key+":"+value)
@ -813,6 +846,8 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*
}
// parseQueryMeta is used to help parse query meta-data
//
// TODO(rb): bug? the error from this function is never handled
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
header := resp.Header
@ -890,10 +925,42 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h
return d, nil, e
}
if resp.StatusCode != 200 {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
resp.Body.Close()
return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
return d, nil, generateUnexpectedResponseCodeError(resp)
}
return d, resp, nil
}
func (req *request) filterQuery(filter string) {
if filter == "" {
return
}
req.params.Set("filter", filter)
}
// generateUnexpectedResponseCodeError consumes the rest of the body, closes
// the body stream and generates an error indicating the status code was
// unexpected.
func generateUnexpectedResponseCodeError(resp *http.Response) error {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
resp.Body.Close()
return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
}
func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
if e != nil {
if resp != nil {
resp.Body.Close()
}
return false, d, nil, e
}
switch resp.StatusCode {
case 200:
return true, d, resp, nil
case 404:
return false, d, resp, nil
default:
return false, d, nil, generateUnexpectedResponseCodeError(resp)
}
}

255
vendor/github.com/hashicorp/consul/api/config_entry.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
package api
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
"github.com/mitchellh/mapstructure"
)
const (
ServiceDefaults string = "service-defaults"
ProxyDefaults string = "proxy-defaults"
ProxyConfigGlobal string = "global"
)
type ConfigEntry interface {
GetKind() string
GetName() string
GetCreateIndex() uint64
GetModifyIndex() uint64
}
type ServiceConfigEntry struct {
Kind string
Name string
Protocol string
CreateIndex uint64
ModifyIndex uint64
}
func (s *ServiceConfigEntry) GetKind() string {
return s.Kind
}
func (s *ServiceConfigEntry) GetName() string {
return s.Name
}
func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
return s.CreateIndex
}
func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
return s.ModifyIndex
}
type ProxyConfigEntry struct {
Kind string
Name string
Config map[string]interface{}
CreateIndex uint64
ModifyIndex uint64
}
func (p *ProxyConfigEntry) GetKind() string {
return p.Kind
}
func (p *ProxyConfigEntry) GetName() string {
return p.Name
}
func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
return p.CreateIndex
}
func (p *ProxyConfigEntry) GetModifyIndex() uint64 {
return p.ModifyIndex
}
type rawEntryListResponse struct {
kind string
Entries []map[string]interface{}
}
func makeConfigEntry(kind, name string) (ConfigEntry, error) {
switch kind {
case ServiceDefaults:
return &ServiceConfigEntry{Name: name}, nil
case ProxyDefaults:
return &ProxyConfigEntry{Name: name}, nil
default:
return nil, fmt.Errorf("invalid config entry kind: %s", kind)
}
}
func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
var entry ConfigEntry
kindVal, ok := raw["Kind"]
if !ok {
kindVal, ok = raw["kind"]
}
if !ok {
return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level")
}
if kindStr, ok := kindVal.(string); ok {
newEntry, err := makeConfigEntry(kindStr, "")
if err != nil {
return nil, err
}
entry = newEntry
} else {
return nil, fmt.Errorf("Kind value in payload is not a string")
}
decodeConf := &mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
Result: &entry,
WeaklyTypedInput: true,
}
decoder, err := mapstructure.NewDecoder(decodeConf)
if err != nil {
return nil, err
}
return entry, decoder.Decode(raw)
}
func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) {
var raw map[string]interface{}
if err := json.Unmarshal(data, &raw); err != nil {
return nil, err
}
return DecodeConfigEntry(raw)
}
// Config can be used to query the Config endpoints
type ConfigEntries struct {
c *Client
}
// Config returns a handle to the Config endpoints
func (c *Client) ConfigEntries() *ConfigEntries {
return &ConfigEntries{c}
}
func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) {
if kind == "" || name == "" {
return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty")
}
entry, err := makeConfigEntry(kind, name)
if err != nil {
return nil, nil, err
}
r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name))
r.setQueryOptions(q)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if err := decodeBody(resp, entry); err != nil {
return nil, nil, err
}
return entry, qm, nil
}
func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) {
if kind == "" {
return nil, nil, fmt.Errorf("The kind parameter must not be empty")
}
r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind))
r.setQueryOptions(q)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var raw []map[string]interface{}
if err := decodeBody(resp, &raw); err != nil {
return nil, nil, err
}
var entries []ConfigEntry
for _, rawEntry := range raw {
entry, err := DecodeConfigEntry(rawEntry)
if err != nil {
return nil, nil, err
}
entries = append(entries, entry)
}
return entries, qm, nil
}
func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) {
return conf.set(entry, nil, w)
}
func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) {
return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w)
}
func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) {
r := conf.c.newRequest("PUT", "/v1/config")
r.setWriteOptions(w)
for param, value := range params {
r.params.Set(param, value)
}
r.obj = entry
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
wm := &WriteMeta{RequestTime: rtt}
return res, wm, nil
}
func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) {
if kind == "" || name == "" {
return nil, fmt.Errorf("Both kind and name parameters must not be empty")
}
r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name))
r.setWriteOptions(w)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}

View File

@ -5,7 +5,7 @@ go 1.12
replace github.com/hashicorp/consul/sdk => ../sdk
require (
github.com/hashicorp/consul/sdk v0.1.0
github.com/hashicorp/consul/sdk v0.1.1
github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-rootcerts v1.0.0
github.com/hashicorp/go-uuid v1.0.1

View File

@ -0,0 +1,2 @@
test::
go test

View File

@ -0,0 +1,6 @@
# sockaddr/template
sockaddr's template library. See
the
[sockaddr/template](https://godoc.org/github.com/hashicorp/go-sockaddr/template)
docs for details on how to use this template.

311
vendor/github.com/hashicorp/go-sockaddr/template/doc.go generated vendored Normal file
View File

@ -0,0 +1,311 @@
/*
Package sockaddr/template provides a text/template interface the SockAddr helper
functions. The primary entry point into the sockaddr/template package is
through its Parse() call. For example:
import (
"fmt"
template "github.com/hashicorp/go-sockaddr/template"
)
results, err := template.Parse(`{{ GetPrivateIP }}`)
if err != nil {
fmt.Errorf("Unable to find a private IP address: %v", err)
}
fmt.Printf("My Private IP address is: %s\n", results)
Below is a list of builtin template functions and details re: their usage. It
is possible to add additional functions by calling ParseIfAddrsTemplate
directly.
In general, the calling convention for this template library is to seed a list
of initial interfaces via one of the Get*Interfaces() calls, then filter, sort,
and extract the necessary attributes for use as string input. This template
interface is primarily geared toward resolving specific values that are only
available at runtime, but can be defined as a heuristic for execution when a
config file is parsed.
All functions, unless noted otherwise, return an array of IfAddr structs making
it possible to `sort`, `filter`, `limit`, seek (via the `offset` function), or
`unique` the list. To extract useful string information, the `attr` and `join`
functions return a single string value. See below for details.
Important note: see the
https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr utility for
more examples and for a CLI utility to experiment with the template syntax.
`GetAllInterfaces` - Returns an exhaustive set of IfAddr structs available on
the host. `GetAllInterfaces` is the initial input and accessible as the initial
"dot" in the pipeline.
Example:
{{ GetAllInterfaces }}
`GetDefaultInterfaces` - Returns one IfAddr for every IP that is on the
interface containing the default route for the host.
Example:
{{ GetDefaultInterfaces }}
`GetPrivateInterfaces` - Returns one IfAddr for every forwardable IP address
that is included in RFC 6890 and whose interface is marked as up. NOTE: RFC 6890 is a more exhaustive
version of RFC1918 because it spans IPv4 and IPv6, however, RFC6890 does permit the
inclusion of likely undesired addresses such as multicast, therefore our version
of "private" also filters out non-forwardable addresses.
Example:
{{ GetPrivateInterfaces | sort "default" | join "address" " " }}
`GetPublicInterfaces` - Returns a list of IfAddr structs whos IPs are
forwardable, do not match RFC 6890, and whose interface is marked up.
Example:
{{ GetPublicInterfaces | sort "default" | join "name" " " }}
`GetPrivateIP` - Helper function that returns a string of the first IP address
from GetPrivateInterfaces.
Example:
{{ GetPrivateIP }}
`GetPrivateIPs` - Helper function that returns a string of the all private IP
addresses on the host.
Example:
{{ GetPrivateIPs }}
`GetPublicIP` - Helper function that returns a string of the first IP from
GetPublicInterfaces.
Example:
{{ GetPublicIP }}
`GetPublicIPs` - Helper function that returns a space-delimited string of the
all public IP addresses on the host.
Example:
{{ GetPrivateIPs }}
`GetInterfaceIP` - Helper function that returns a string of the first IP from
the named interface.
Example:
{{ GetInterfaceIP "en0" }}
`GetInterfaceIPs` - Helper function that returns a space-delimited list of all
IPs on a given interface.
Example:
{{ GetInterfaceIPs "en0" }}
`sort` - Sorts the IfAddrs result based on its arguments. `sort` takes one
argument, a list of ways to sort its IfAddrs argument. The list of sort
criteria is comma separated (`,`):
- `address`, `+address`: Ascending sort of IfAddrs by Address
- `-address`: Descending sort of IfAddrs by Address
- `default`, `+default`: Ascending sort of IfAddrs, IfAddr with a default route first
- `-default`: Descending sort of IfAddrs, IfAttr with default route last
- `name`, `+name`: Ascending sort of IfAddrs by lexical ordering of interface name
- `-name`: Descending sort of IfAddrs by lexical ordering of interface name
- `port`, `+port`: Ascending sort of IfAddrs by port number
- `-port`: Descending sort of IfAddrs by port number
- `private`, `+private`: Ascending sort of IfAddrs with private addresses first
- `-private`: Descending sort IfAddrs with private addresses last
- `size`, `+size`: Ascending sort of IfAddrs by their network size as determined
by their netmask (larger networks first)
- `-size`: Descending sort of IfAddrs by their network size as determined by their
netmask (smaller networks first)
- `type`, `+type`: Ascending sort of IfAddrs by the type of the IfAddr (Unix,
IPv4, then IPv6)
- `-type`: Descending sort of IfAddrs by the type of the IfAddr (IPv6, IPv4, Unix)
Example:
{{ GetPrivateInterfaces | sort "default,-type,size,+address" }}
`exclude` and `include`: Filters IfAddrs based on the selector criteria and its
arguments. Both `exclude` and `include` take two arguments. The list of
available filtering criteria is:
- "address": Filter IfAddrs based on a regexp matching the string representation
of the address
- "flag","flags": Filter IfAddrs based on the list of flags specified. Multiple
flags can be passed together using the pipe character (`|`) to create an inclusive
bitmask of flags. The list of flags is included below.
- "name": Filter IfAddrs based on a regexp matching the interface name.
- "network": Filter IfAddrs based on whether a netowkr is included in a given
CIDR. More than one CIDR can be passed in if each network is separated by
the pipe character (`|`).
- "port": Filter IfAddrs based on an exact match of the port number (number must
be expressed as a string)
- "rfc", "rfcs": Filter IfAddrs based on the matching RFC. If more than one RFC
is specified, the list of RFCs can be joined together using the pipe character (`|`).
- "size": Filter IfAddrs based on the exact match of the mask size.
- "type": Filter IfAddrs based on their SockAddr type. Multiple types can be
specified together by using the pipe character (`|`). Valid types include:
`ip`, `ipv4`, `ipv6`, and `unix`.
Example:
{{ GetPrivateInterfaces | exclude "type" "IPv6" }}
`unique`: Removes duplicate entries from the IfAddrs list, assuming the list has
already been sorted. `unique` only takes one argument:
- "address": Removes duplicates with the same address
- "name": Removes duplicates with the same interface names
Example:
{{ GetAllInterfaces | sort "default,-type,address" | unique "name" }}
`limit`: Reduces the size of the list to the specified value.
Example:
{{ GetPrivateInterfaces | limit 1 }}
`offset`: Seeks into the list by the specified value. A negative value can be
used to seek from the end of the list.
Example:
{{ GetPrivateInterfaces | offset "-2" | limit 1 }}
`math`: Perform a "math" operation on each member of the list and return new
values. `math` takes two arguments, the attribute to operate on and the
operation's value.
Supported operations include:
- `address`: Adds the value, a positive or negative value expressed as a
decimal string, to the address. The sign is required. This value is
allowed to over or underflow networks (e.g. 127.255.255.255 `"address" "+1"`
will return "128.0.0.0"). Addresses will wrap at IPv4 or IPv6 boundaries.
- `network`: Add the value, a positive or negative value expressed as a
decimal string, to the network address. The sign is required. Positive
values are added to the network address. Negative values are subtracted
from the network's broadcast address (e.g. 127.0.0.1 `"network" "-1"` will
return "127.255.255.255"). Values that overflow the network size will
safely wrap.
- `mask`: Applies the given network mask to the address. The network mask is
expressed as a decimal value (e.g. network mask "24" corresponds to
`255.255.255.0`). After applying the network mask, the network mask of the
resulting address will be either the applied network mask or the network mask
of the input address depending on which network is larger
(e.g. 192.168.10.20/24 `"mask" "16"` will return "192.168.0.0/16" but
192.168.10.20/24 `"mask" "28"` will return "192.168.10.16/24").
Example:
{{ GetPrivateInterfaces | include "type" "IP" | math "address" "+256" | attr "address" }}
{{ GetPrivateInterfaces | include "type" "IP" | math "address" "-256" | attr "address" }}
{{ GetPrivateInterfaces | include "type" "IP" | math "network" "+2" | attr "address" }}
{{ GetPrivateInterfaces | include "type" "IP" | math "network" "-2" | attr "address" }}
{{ GetPrivateInterfaces | include "type" "IP" | math "mask" "24" | attr "address" }}
{{ GetPrivateInterfaces | include "flags" "forwardable|up" | include "type" "IPv4" | math "network" "+2" | attr "address" }}
`attr`: Extracts a single attribute of the first member of the list and returns
it as a string. `attr` takes a single attribute name. The list of available
attributes is type-specific and shared between `join`. See below for a list of
supported attributes.
Example:
{{ GetAllInterfaces | exclude "flags" "up" | attr "address" }}
`Attr`: Extracts a single attribute from an `IfAttr` and in every other way
performs the same as the `attr`.
Example:
{{ with $ifAddrs := GetAllInterfaces | include "type" "IP" | sort "+type,+address" -}}
{{- range $ifAddrs -}}
{{- Attr "address" . }} -- {{ Attr "network" . }}/{{ Attr "size" . -}}
{{- end -}}
{{- end }}
`join`: Similar to `attr`, `join` extracts all matching attributes of the list
and returns them as a string joined by the separator, the second argument to
`join`. The list of available attributes is type-specific and shared between
`join`.
Example:
{{ GetAllInterfaces | include "flags" "forwardable" | join "address" " " }}
`exclude` and `include` flags:
- `broadcast`
- `down`: Is the interface down?
- `forwardable`: Is the IP forwardable?
- `global unicast`
- `interface-local multicast`
- `link-local multicast`
- `link-local unicast`
- `loopback`
- `multicast`
- `point-to-point`
- `unspecified`: Is the IfAddr the IPv6 unspecified address?
- `up`: Is the interface up?
Attributes for `attr`, `Attr`, and `join`:
SockAddr Type:
- `string`
- `type`
IPAddr Type:
- `address`
- `binary`
- `first_usable`
- `hex`
- `host`
- `last_usable`
- `mask_bits`
- `netmask`
- `network`
- `octets`: Decimal values per byte
- `port`
- `size`: Number of hosts in the network
IPv4Addr Type:
- `broadcast`
- `uint32`: unsigned integer representation of the value
IPv6Addr Type:
- `uint128`: unsigned integer representation of the value
UnixSock Type:
- `path`
*/
package template

View File

@ -0,0 +1,155 @@
package template
import (
"bytes"
"fmt"
"text/template"
"github.com/hashicorp/errwrap"
sockaddr "github.com/hashicorp/go-sockaddr"
)
var (
// SourceFuncs is a map of all top-level functions that generate
// sockaddr data types.
SourceFuncs template.FuncMap
// SortFuncs is a map of all functions used in sorting
SortFuncs template.FuncMap
// FilterFuncs is a map of all functions used in sorting
FilterFuncs template.FuncMap
// HelperFuncs is a map of all functions used in sorting
HelperFuncs template.FuncMap
)
func init() {
SourceFuncs = template.FuncMap{
// GetAllInterfaces - Returns an exhaustive set of IfAddr
// structs available on the host. `GetAllInterfaces` is the
// initial input and accessible as the initial "dot" in the
// pipeline.
"GetAllInterfaces": sockaddr.GetAllInterfaces,
// GetDefaultInterfaces - Returns one IfAddr for every IP that
// is on the interface containing the default route for the
// host.
"GetDefaultInterfaces": sockaddr.GetDefaultInterfaces,
// GetPrivateInterfaces - Returns one IfAddr for every IP that
// matches RFC 6890, are attached to the interface with the
// default route, and are forwardable IP addresses. NOTE: RFC
// 6890 is a more exhaustive version of RFC1918 because it spans
// IPv4 and IPv6, however it doespermit the inclusion of likely
// undesired addresses such as multicast, therefore our
// definition of a "private" address also excludes
// non-forwardable IP addresses (as defined by the IETF).
"GetPrivateInterfaces": sockaddr.GetPrivateInterfaces,
// GetPublicInterfaces - Returns a list of IfAddr that do not
// match RFC 6890, are attached to the default route, and are
// forwardable.
"GetPublicInterfaces": sockaddr.GetPublicInterfaces,
}
SortFuncs = template.FuncMap{
"sort": sockaddr.SortIfBy,
}
FilterFuncs = template.FuncMap{
"exclude": sockaddr.ExcludeIfs,
"include": sockaddr.IncludeIfs,
}
HelperFuncs = template.FuncMap{
// Misc functions that operate on IfAddrs inputs
"attr": Attr,
"join": sockaddr.JoinIfAddrs,
"limit": sockaddr.LimitIfAddrs,
"offset": sockaddr.OffsetIfAddrs,
"unique": sockaddr.UniqueIfAddrsBy,
// Misc math functions that operate on a single IfAddr input
"math": sockaddr.IfAddrsMath,
// Return a Private RFC 6890 IP address string that is attached
// to the default route and a forwardable address.
"GetPrivateIP": sockaddr.GetPrivateIP,
// Return all Private RFC 6890 IP addresses as a space-delimited string of
// IP addresses. Addresses returned do not have to be on the interface with
// a default route.
"GetPrivateIPs": sockaddr.GetPrivateIPs,
// Return a Public RFC 6890 IP address string that is attached
// to the default route and a forwardable address.
"GetPublicIP": sockaddr.GetPublicIP,
// Return allPublic RFC 6890 IP addresses as a space-delimited string of IP
// addresses. Addresses returned do not have to be on the interface with a
// default route.
"GetPublicIPs": sockaddr.GetPublicIPs,
// Return the first IP address of the named interface, sorted by
// the largest network size.
"GetInterfaceIP": sockaddr.GetInterfaceIP,
// Return all IP addresses on the named interface, sorted by the largest
// network size.
"GetInterfaceIPs": sockaddr.GetInterfaceIPs,
}
}
// Attr returns the attribute from the ifAddrRaw argument. If the argument is
// an IfAddrs, only the first element will be evaluated for resolution.
func Attr(selectorName string, ifAddrsRaw interface{}) (string, error) {
switch v := ifAddrsRaw.(type) {
case sockaddr.IfAddr:
return sockaddr.IfAttr(selectorName, v)
case sockaddr.IfAddrs:
return sockaddr.IfAttrs(selectorName, v)
default:
return "", fmt.Errorf("unable to obtain attribute %s from type %T (%v)", selectorName, ifAddrsRaw, ifAddrsRaw)
}
}
// Parse parses input as template input using the addresses available on the
// host, then returns the string output if there are no errors.
func Parse(input string) (string, error) {
addrs, err := sockaddr.GetAllInterfaces()
if err != nil {
return "", errwrap.Wrapf("unable to query interface addresses: {{err}}", err)
}
return ParseIfAddrs(input, addrs)
}
// ParseIfAddrs parses input as template input using the IfAddrs inputs, then
// returns the string output if there are no errors.
func ParseIfAddrs(input string, ifAddrs sockaddr.IfAddrs) (string, error) {
return ParseIfAddrsTemplate(input, ifAddrs, template.New("sockaddr.Parse"))
}
// ParseIfAddrsTemplate parses input as template input using the IfAddrs inputs,
// then returns the string output if there are no errors.
func ParseIfAddrsTemplate(input string, ifAddrs sockaddr.IfAddrs, tmplIn *template.Template) (string, error) {
// Create a template, add the function map, and parse the text.
tmpl, err := tmplIn.Option("missingkey=error").
Funcs(SourceFuncs).
Funcs(SortFuncs).
Funcs(FilterFuncs).
Funcs(HelperFuncs).
Parse(input)
if err != nil {
return "", errwrap.Wrapf(fmt.Sprintf("unable to parse template %+q: {{err}}", input), err)
}
var outWriter bytes.Buffer
err = tmpl.Execute(&outWriter, ifAddrs)
if err != nil {
return "", errwrap.Wrapf(fmt.Sprintf("unable to execute sockaddr input %+q: {{err}}", input), err)
}
return outWriter.String(), nil
}

View File

@ -0,0 +1,28 @@
package pointerutil
import (
"os"
"time"
)
// StringPtr returns a pointer to a string value
func StringPtr(s string) *string {
return &s
}
// BoolPtr returns a pointer to a boolean value
func BoolPtr(b bool) *bool {
return &b
}
// TimeDurationPtr returns a pointer to a time duration value
func TimeDurationPtr(duration string) *time.Duration {
d, _ := time.ParseDuration(duration)
return &d
}
// FileModePtr returns a pointer to the given os.FileMode
func FileModePtr(o os.FileMode) *os.FileMode {
return &o
}

93
vendor/github.com/lib/pq/oid/gen.go generated vendored
View File

@ -1,93 +0,0 @@
// +build ignore
// Generate the table of OID values
// Run with 'go run gen.go'.
package main
import (
"database/sql"
"fmt"
"log"
"os"
"os/exec"
"strings"
_ "github.com/lib/pq"
)
// OID represent a postgres Object Identifier Type.
type OID struct {
ID int
Type string
}
// Name returns an upper case version of the oid type.
func (o OID) Name() string {
return strings.ToUpper(o.Type)
}
func main() {
datname := os.Getenv("PGDATABASE")
sslmode := os.Getenv("PGSSLMODE")
if datname == "" {
os.Setenv("PGDATABASE", "pqgotest")
}
if sslmode == "" {
os.Setenv("PGSSLMODE", "disable")
}
db, err := sql.Open("postgres", "")
if err != nil {
log.Fatal(err)
}
rows, err := db.Query(`
SELECT typname, oid
FROM pg_type WHERE oid < 10000
ORDER BY oid;
`)
if err != nil {
log.Fatal(err)
}
oids := make([]*OID, 0)
for rows.Next() {
var oid OID
if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
log.Fatal(err)
}
oids = append(oids, &oid)
}
if err = rows.Err(); err != nil {
log.Fatal(err)
}
cmd := exec.Command("gofmt")
cmd.Stderr = os.Stderr
w, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
}
f, err := os.Create("types.go")
if err != nil {
log.Fatal(err)
}
cmd.Stdout = f
err = cmd.Start()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
fmt.Fprintln(w, "\npackage oid")
fmt.Fprintln(w, "const (")
for _, oid := range oids {
fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
}
fmt.Fprintln(w, ")")
fmt.Fprintln(w, "var TypeName = map[Oid]string{")
for _, oid := range oids {
fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
}
fmt.Fprintln(w, "}")
w.Close()
cmd.Wait()
}

8
vendor/github.com/mattn/go-shellwords/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
language: go
go:
- tip
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- $HOME/gopath/bin/goveralls -repotoken 2FMhp57u8LcstKL9B190fLTcEnBtAAiEL

21
vendor/github.com/mattn/go-shellwords/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2017 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

47
vendor/github.com/mattn/go-shellwords/README.md generated vendored Normal file
View File

@ -0,0 +1,47 @@
# go-shellwords
[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master)
[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords)
Parse line as shell words.
## Usage
```go
args, err := shellwords.Parse("./foo --bar=baz")
// args should be ["./foo", "--bar=baz"]
```
```go
os.Setenv("FOO", "bar")
p := shellwords.NewParser()
p.ParseEnv = true
args, err := p.Parse("./foo $FOO")
// args should be ["./foo", "bar"]
```
```go
p := shellwords.NewParser()
p.ParseBacktick = true
args, err := p.Parse("./foo `echo $SHELL`")
// args should be ["./foo", "/bin/bash"]
```
```go
shellwords.ParseBacktick = true
p := shellwords.NewParser()
args, err := p.Parse("./foo `echo $SHELL`")
// args should be ["./foo", "/bin/bash"]
```
# Thanks
This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine).
# License
under the MIT License: http://mattn.mit-license.org/2017
# Author
Yasuhiro Matsumoto (a.k.a mattn)

1
vendor/github.com/mattn/go-shellwords/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/mattn/go-shellwords

195
vendor/github.com/mattn/go-shellwords/shellwords.go generated vendored Normal file
View File

@ -0,0 +1,195 @@
package shellwords
import (
"errors"
"os"
"regexp"
"strings"
)
var (
ParseEnv bool = false
ParseBacktick bool = false
)
var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`)
func isSpace(r rune) bool {
switch r {
case ' ', '\t', '\r', '\n':
return true
}
return false
}
func replaceEnv(getenv func(string) string, s string) string {
if getenv == nil {
getenv = os.Getenv
}
return envRe.ReplaceAllStringFunc(s, func(s string) string {
s = s[1:]
if s[0] == '{' {
s = s[1 : len(s)-1]
}
return getenv(s)
})
}
type Parser struct {
ParseEnv bool
ParseBacktick bool
Position int
// If ParseEnv is true, use this for getenv.
// If nil, use os.Getenv.
Getenv func(string) string
}
func NewParser() *Parser {
return &Parser{
ParseEnv: ParseEnv,
ParseBacktick: ParseBacktick,
Position: 0,
}
}
func (p *Parser) Parse(line string) ([]string, error) {
args := []string{}
buf := ""
var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool
backtick := ""
pos := -1
got := false
loop:
for i, r := range line {
if escaped {
buf += string(r)
escaped = false
continue
}
if r == '\\' {
if singleQuoted {
buf += string(r)
} else {
escaped = true
}
continue
}
if isSpace(r) {
if singleQuoted || doubleQuoted || backQuote || dollarQuote {
buf += string(r)
backtick += string(r)
} else if got {
if p.ParseEnv {
buf = replaceEnv(p.Getenv, buf)
}
args = append(args, buf)
buf = ""
got = false
}
continue
}
switch r {
case '`':
if !singleQuoted && !doubleQuoted && !dollarQuote {
if p.ParseBacktick {
if backQuote {
out, err := shellRun(backtick)
if err != nil {
return nil, err
}
buf = out
}
backtick = ""
backQuote = !backQuote
continue
}
backtick = ""
backQuote = !backQuote
}
case ')':
if !singleQuoted && !doubleQuoted && !backQuote {
if p.ParseBacktick {
if dollarQuote {
out, err := shellRun(backtick)
if err != nil {
return nil, err
}
if r == ')' {
buf = buf[:len(buf)-len(backtick)-2] + out
} else {
buf = buf[:len(buf)-len(backtick)-1] + out
}
}
backtick = ""
dollarQuote = !dollarQuote
continue
}
backtick = ""
dollarQuote = !dollarQuote
}
case '(':
if !singleQuoted && !doubleQuoted && !backQuote {
if !dollarQuote && strings.HasSuffix(buf, "$") {
dollarQuote = true
buf += "("
continue
} else {
return nil, errors.New("invalid command line string")
}
}
case '"':
if !singleQuoted && !dollarQuote {
doubleQuoted = !doubleQuoted
continue
}
case '\'':
if !doubleQuoted && !dollarQuote {
singleQuoted = !singleQuoted
continue
}
case ';', '&', '|', '<', '>':
if !(escaped || singleQuoted || doubleQuoted || backQuote) {
if r == '>' && len(buf) > 0 {
if c := buf[0]; '0' <= c && c <= '9' {
i -= 1
got = false
}
}
pos = i
break loop
}
}
got = true
buf += string(r)
if backQuote || dollarQuote {
backtick += string(r)
}
}
if got {
if p.ParseEnv {
buf = replaceEnv(p.Getenv, buf)
}
args = append(args, buf)
}
if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote {
return nil, errors.New("invalid command line string")
}
p.Position = pos
return args, nil
}
func Parse(line string) ([]string, error) {
return NewParser().Parse(line)
}

24
vendor/github.com/mattn/go-shellwords/util_go15.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// +build !go1.6
package shellwords
import (
"os"
"os/exec"
"runtime"
"strings"
)
func shellRun(line string) (string, error) {
var b []byte
var err error
if runtime.GOOS == "windows" {
b, err = exec.Command(os.Getenv("COMSPEC"), "/c", line).Output()
} else {
b, err = exec.Command(os.Getenv("SHELL"), "-c", line).Output()
}
if err != nil {
return "", err
}
return strings.TrimSpace(string(b)), nil
}

22
vendor/github.com/mattn/go-shellwords/util_posix.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// +build !windows,go1.6
package shellwords
import (
"errors"
"os"
"os/exec"
"strings"
)
func shellRun(line string) (string, error) {
shell := os.Getenv("SHELL")
b, err := exec.Command(shell, "-c", line).Output()
if err != nil {
if eerr, ok := err.(*exec.ExitError); ok {
b = eerr.Stderr
}
return "", errors.New(err.Error() + ":" + string(b))
}
return strings.TrimSpace(string(b)), nil
}

22
vendor/github.com/mattn/go-shellwords/util_windows.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// +build windows,go1.6
package shellwords
import (
"errors"
"os"
"os/exec"
"strings"
)
func shellRun(line string) (string, error) {
shell := os.Getenv("COMSPEC")
b, err := exec.Command(shell, "/c", line).Output()
if err != nil {
if eerr, ok := err.(*exec.ExitError); ok {
b = eerr.Stderr
}
return "", errors.New(err.Error() + ":" + string(b))
}
return strings.TrimSpace(string(b)), nil
}

21
vendor/github.com/mitchellh/hashstructure/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

65
vendor/github.com/mitchellh/hashstructure/README.md generated vendored Normal file
View File

@ -0,0 +1,65 @@
# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure)
hashstructure is a Go library for creating a unique hash value
for arbitrary values in Go.
This can be used to key values in a hash (for use in a map, set, etc.)
that are complex. The most common use case is comparing two values without
sending data across the network, caching values locally (de-dup), and so on.
## Features
* Hash any arbitrary Go value, including complex types.
* Tag a struct field to ignore it and not affect the hash value.
* Tag a slice type struct field to treat it as a set where ordering
doesn't affect the hash code but the field itself is still taken into
account to create the hash value.
* Optionally specify a custom hash function to optimize for speed, collision
avoidance for your data set, etc.
* Optionally hash the output of `.String()` on structs that implement fmt.Stringer,
allowing effective hashing of time.Time
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/hashstructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
A quick code example is shown below:
```go
type ComplexStruct struct {
Name string
Age uint
Metadata map[string]interface{}
}
v := ComplexStruct{
Name: "mitchellh",
Age: 64,
Metadata: map[string]interface{}{
"car": true,
"location": "California",
"siblings": []string{"Bob", "John"},
},
}
hash, err := hashstructure.Hash(v, nil)
if err != nil {
panic(err)
}
fmt.Printf("%d", hash)
// Output:
// 2307517237273902113
```

1
vendor/github.com/mitchellh/hashstructure/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/mitchellh/hashstructure

View File

@ -0,0 +1,358 @@
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/fnv"
"reflect"
)
// ErrNotStringer is returned when there's an error with hash:"string"
type ErrNotStringer struct {
Field string
}
// Error implements error for ErrNotStringer
func (ens *ErrNotStringer) Error() string {
return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
}
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to FNV.
Hasher hash.Hash64
// TagName is the struct tag to look at when hashing the structure.
// By default this is "hash".
TagName string
// ZeroNil is flag determining if nil pointer should be treated equal
// to a zero value of pointed type. By default this is false.
ZeroNil bool
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values. The same *HashOptions value cannot be used
// concurrently. None of the values within a *HashOptions struct are
// safe to read/write while hashing is being done.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
// For structs, the hashing can be controlled using tags. For example:
//
// struct {
// Name string
// UUID string `hash:"ignore"`
// }
//
// The available tag values are:
//
// * "ignore" or "-" - The field will be ignored and not affect the hash code.
//
// * "set" - The field will be treated as a set, where ordering doesn't
// affect the hash code. This only works for slices.
//
// * "string" - The field will be hashed as a string, only works when the
// field implements fmt.Stringer
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = fnv.New64()
}
if opts.TagName == "" {
opts.TagName = "hash"
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{
h: opts.Hasher,
tag: opts.TagName,
zeronil: opts.ZeroNil,
}
return w.visit(reflect.ValueOf(v), nil)
}
type walker struct {
h hash.Hash64
tag string
zeronil bool
}
type visitOpts struct {
// Flags are a bitmask of flags to affect behavior of this visit
Flags visitFlag
// Information about the struct containing this field
Struct interface{}
StructField string
}
func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
t := reflect.TypeOf(0)
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
if w.zeronil {
t = v.Type().Elem()
}
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
v = reflect.Zero(t)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
// A direct hash calculation
w.h.Reset()
err := binary.Write(w.h, binary.LittleEndian, v.Interface())
return w.h.Sum64(), err
}
switch k {
case reflect.Array:
var h uint64
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
h = hashUpdateOrdered(w.h, h, current)
}
return h, nil
case reflect.Map:
var includeMap IncludableMap
if opts != nil && opts.Struct != nil {
if v, ok := opts.Struct.(IncludableMap); ok {
includeMap = v
}
}
// Build the hash for the map. We do this by XOR-ing all the key
// and value hashes. This makes it deterministic despite ordering.
var h uint64
for _, k := range v.MapKeys() {
v := v.MapIndex(k)
if includeMap != nil {
incl, err := includeMap.HashIncludeMap(
opts.StructField, k.Interface(), v.Interface())
if err != nil {
return 0, err
}
if !incl {
continue
}
}
kh, err := w.visit(k, nil)
if err != nil {
return 0, err
}
vh, err := w.visit(v, nil)
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
return h, nil
case reflect.Struct:
parent := v.Interface()
var include Includable
if impl, ok := parent.(Includable); ok {
include = impl
}
t := v.Type()
h, err := w.visit(reflect.ValueOf(t.Name()), nil)
if err != nil {
return 0, err
}
l := v.NumField()
for i := 0; i < l; i++ {
if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
var f visitFlag
fieldType := t.Field(i)
if fieldType.PkgPath != "" {
// Unexported
continue
}
tag := fieldType.Tag.Get(w.tag)
if tag == "ignore" || tag == "-" {
// Ignore this field
continue
}
// if string is set, use the string value
if tag == "string" {
if impl, ok := innerV.Interface().(fmt.Stringer); ok {
innerV = reflect.ValueOf(impl.String())
} else {
return 0, &ErrNotStringer{
Field: v.Type().Field(i).Name,
}
}
}
// Check if we implement includable and check it
if include != nil {
incl, err := include.HashInclude(fieldType.Name, innerV)
if err != nil {
return 0, err
}
if !incl {
continue
}
}
switch tag {
case "set":
f |= visitFlagSet
}
kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
if err != nil {
return 0, err
}
vh, err := w.visit(innerV, &visitOpts{
Flags: f,
Struct: parent,
StructField: fieldType.Name,
})
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
}
return h, nil
case reflect.Slice:
// We have two behaviors here. If it isn't a set, then we just
// visit all the elements. If it is a set, then we do a deterministic
// hash code.
var h uint64
var set bool
if opts != nil {
set = (opts.Flags & visitFlagSet) != 0
}
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
if set {
h = hashUpdateUnordered(h, current)
} else {
h = hashUpdateOrdered(w.h, h, current)
}
}
return h, nil
case reflect.String:
// Directly hash
w.h.Reset()
_, err := w.h.Write([]byte(v.String()))
return w.h.Sum64(), err
default:
return 0, fmt.Errorf("unknown kind to hash: %s", k)
}
}
func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
// For ordered updates, use a real hash function
h.Reset()
// We just panic if the binary writes fail because we are writing
// an int64 which should never be fail-able.
e1 := binary.Write(h, binary.LittleEndian, a)
e2 := binary.Write(h, binary.LittleEndian, b)
if e1 != nil {
panic(e1)
}
if e2 != nil {
panic(e2)
}
return h.Sum64()
}
func hashUpdateUnordered(a, b uint64) uint64 {
return a ^ b
}
// visitFlag is used as a bitmask for affecting visit behavior
type visitFlag uint
const (
visitFlagInvalid visitFlag = iota
visitFlagSet = iota << 1
)

Some files were not shown because too many files have changed in this diff Show More