Merge branch 'master-oss' into f-vault-service

This commit is contained in:
Jeff Mitchell 2016-05-04 17:20:00 -04:00
commit 885cc73b2e
70 changed files with 3177 additions and 684 deletions

View file

@ -27,17 +27,19 @@ DEPRECATIONS/BREAKING CHANGES:
using the `consul` backend and will perform its own health checks. See
the Consul backend documentation for information on how to disable
auto-registration and service checks.
* List operations that do not find any keys now return a `404` status code
rather than an empty response object [GH-1365]
FEATURES:
* **Azure Physical Backend**: You can now use Azure blob object storage as
your Vault physical data store [GH-1266]
* **Consul Backend**: Consul backend will automatically register a `vault`
service and perform its own health checking. By default the active node
can be found at `active.vault.service.consul` and all with standby nodes
are `standby.vault.service.consul`. Sealed vaults are marked critical and
are not listed by default in Consul's service discovery. See the
documentation for details. [GH-1349]
* **Consul Backend Health Checks**: The Consul backend will automatically
register a `vault` service and perform its own health checking. By default
the active node can be found at `active.vault.service.consul` and all with
standby nodes are `standby.vault.service.consul`. Sealed vaults are marked
critical and are not listed by default in Consul's service discovery. See
the documentation for details. [GH-1349]
IMPROVEMENTS:
@ -68,6 +70,10 @@ BUG FIXES:
* command/various: Tell the JSON decoder to not convert all numbers to floats;
fixes some various places where numbers were showing up in scientific
notation
* core: Properly persist mount-tuned TTLs for auth backends [GH-1371]
* core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372]
* credential/github: Make organization comparison case-insensitive during
login [GH-1359]
* credential/ldap: Fix problem where certain error conditions when configuring
or opening LDAP connections would cause a panic instead of return a useful
error message [GH-1262]
@ -76,6 +82,8 @@ BUG FIXES:
`default` resulted in the same behavior anyways. [GH-1276]
* credential/token: Fix issues renewing tokens when using the "suffix"
capability of token roles [GH-1331]
* credential/token: Fix lookup via POST showing the request token instead of
the desired token [GH-1354]
* credential/various: Fix renewal conditions when `default` policy is not
contained in the backend config [GH-1256]
* physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353]

245
Godeps/Godeps.json generated
View file

@ -8,8 +8,8 @@
"Deps": [
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v2.1.1-beta",
"Rev": "a1883f7b98346e4908a6c25230c95a8a3026a10c"
"Comment": "v2.1.1-beta-4-gd4e45a6",
"Rev": "d4e45a63c9d269ddff72970df7c80d214aeddcf3"
},
{
"ImportPath": "github.com/armon/go-metrics",
@ -26,153 +26,153 @@
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/iam",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
"Comment": "v1.1.20-13-gd6f37f6",
"Rev": "d6f37f6686a9207f3688d440733c4d20301273db"
"Comment": "v1.1.22-3-g1915858",
"Rev": "1915858199be30d43264f86f9b9b469b7f2c8340"
},
{
"ImportPath": "github.com/bgentry/speakeasy",
@ -180,32 +180,32 @@
},
{
"ImportPath": "github.com/cloudfoundry-incubator/candiedyaml",
"Rev": "5cef21e2e4f0fd147973b558d4db7395176bcd95"
"Rev": "99c3df83b51532e3615f851d8c2dbb638f5313bf"
},
{
"ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.3.0-556-g7161eee",
"Rev": "7161eeed8b7fa59551f8ad8ec71251be8579d5eb"
"Comment": "v2.3.0-651-gfd27f9c",
"Rev": "fd27f9cd288c7ccd03d2555663a2ed20178d4b5c"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.3.0-556-g7161eee",
"Rev": "7161eeed8b7fa59551f8ad8ec71251be8579d5eb"
"Comment": "v2.3.0-651-gfd27f9c",
"Rev": "fd27f9cd288c7ccd03d2555663a2ed20178d4b5c"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
"Comment": "v2.3.0-556-g7161eee",
"Rev": "7161eeed8b7fa59551f8ad8ec71251be8579d5eb"
"Comment": "v2.3.0-651-gfd27f9c",
"Rev": "fd27f9cd288c7ccd03d2555663a2ed20178d4b5c"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/transport",
"Comment": "v2.3.0-556-g7161eee",
"Rev": "7161eeed8b7fa59551f8ad8ec71251be8579d5eb"
"Comment": "v2.3.0-651-gfd27f9c",
"Rev": "fd27f9cd288c7ccd03d2555663a2ed20178d4b5c"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.3.0-556-g7161eee",
"Rev": "7161eeed8b7fa59551f8ad8ec71251be8579d5eb"
"Comment": "v2.3.0-651-gfd27f9c",
"Rev": "fd27f9cd288c7ccd03d2555663a2ed20178d4b5c"
},
{
"ImportPath": "github.com/denisenkom/go-mssqldb",
@ -234,7 +234,7 @@
},
{
"ImportPath": "github.com/go-ldap/ldap",
"Comment": "v2.2.2-3-g0e7db8e",
"Comment": "v2.3.0",
"Rev": "0e7db8eb77695b5a952f0e5d78df9ab160050c73"
},
{
@ -244,27 +244,27 @@
},
{
"ImportPath": "github.com/gocql/gocql",
"Comment": "pre-node-events-267-g1440c60",
"Rev": "1440c609669494bcb31b1e300e8d2ef51e205dd3"
"Comment": "pre-node-events-293-g2e0390b",
"Rev": "2e0390b6fbe023664fcdaa9767f0f83fbe78956d"
},
{
"ImportPath": "github.com/gocql/gocql/internal/lru",
"Comment": "pre-node-events-267-g1440c60",
"Rev": "1440c609669494bcb31b1e300e8d2ef51e205dd3"
"Comment": "pre-node-events-293-g2e0390b",
"Rev": "2e0390b6fbe023664fcdaa9767f0f83fbe78956d"
},
{
"ImportPath": "github.com/gocql/gocql/internal/murmur",
"Comment": "pre-node-events-267-g1440c60",
"Rev": "1440c609669494bcb31b1e300e8d2ef51e205dd3"
"Comment": "pre-node-events-293-g2e0390b",
"Rev": "2e0390b6fbe023664fcdaa9767f0f83fbe78956d"
},
{
"ImportPath": "github.com/gocql/gocql/internal/streams",
"Comment": "pre-node-events-267-g1440c60",
"Rev": "1440c609669494bcb31b1e300e8d2ef51e205dd3"
"Comment": "pre-node-events-293-g2e0390b",
"Rev": "2e0390b6fbe023664fcdaa9767f0f83fbe78956d"
},
{
"ImportPath": "github.com/golang/snappy",
"Rev": "ec642410cd033af63620b66a91ccbd3c69c2c59a"
"Rev": "43fea289edce21979658cbbdb3925390890aa86e"
},
{
"ImportPath": "github.com/google/go-github/github",
@ -280,8 +280,13 @@
},
{
"ImportPath": "github.com/hashicorp/consul/api",
"Comment": "v0.6.4-256-gadcf935",
"Rev": "adcf93535058234780f9aaffb40497ba194f60a0"
"Comment": "v0.6.4-268-g9f272b0",
"Rev": "9f272b088131af951ea1f77594905ceae83fd2c1"
},
{
"ImportPath": "github.com/hashicorp/consul/lib",
"Comment": "v0.6.4-268-g9f272b0",
"Rev": "9f272b088131af951ea1f77594905ceae83fd2c1"
},
{
"ImportPath": "github.com/hashicorp/errwrap",
@ -295,6 +300,10 @@
"ImportPath": "github.com/hashicorp/go-multierror",
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
},
{
"ImportPath": "github.com/hashicorp/go-rootcerts",
"Rev": "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
},
{
"ImportPath": "github.com/hashicorp/go-syslog",
"Rev": "42a2b573b664dbf281bd48c3cc12c086b17a39ba"
@ -313,39 +322,39 @@
},
{
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/parser",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/token",
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
"Rev": "9a905a34e6280ce905da1a32344b25e81011197a"
},
{
"ImportPath": "github.com/hashicorp/logutils",
@ -353,8 +362,8 @@
},
{
"ImportPath": "github.com/hashicorp/serf/coordinate",
"Comment": "v0.7.0-56-ga876af0",
"Rev": "a876af06863abed3e31252a913bad976aeff61df"
"Comment": "v0.7.0-58-gdefb069",
"Rev": "defb069b1bad9f7cdebc647810cb6ae398a1b617"
},
{
"ImportPath": "github.com/jmespath/go-jmespath",
@ -363,13 +372,13 @@
},
{
"ImportPath": "github.com/lib/pq",
"Comment": "go1.0-cutoff-84-g3cd0097",
"Rev": "3cd0097429be7d611bb644ef85b42bfb102ceea4"
"Comment": "go1.0-cutoff-86-gdd3290b",
"Rev": "dd3290b2f71a8b30bee8e4e75a337a825263d26f"
},
{
"ImportPath": "github.com/lib/pq/oid",
"Comment": "go1.0-cutoff-84-g3cd0097",
"Rev": "3cd0097429be7d611bb644ef85b42bfb102ceea4"
"Comment": "go1.0-cutoff-86-gdd3290b",
"Rev": "dd3290b2f71a8b30bee8e4e75a337a825263d26f"
},
{
"ImportPath": "github.com/mattn/go-isatty",
@ -410,75 +419,75 @@
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/cast5",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/md4",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/openpgp",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/armor",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/elgamal",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/errors",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/packet",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/s2k",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/ssh/agent",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/crypto/ssh/terminal",
"Rev": "c9aef117f53e16ed3a6cc3d93cd357dbf2005065"
"Rev": "2cb52d93744fdc9a51f5c87b36fe2208d424af5a"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "b797637b7aeeed133049c7281bfa31dcc9ca42d6"
"Rev": "35ec611a141ee705590b9eb64d673f9e6dfeb1ac"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "7e9cd5d59563851383f8f81a7fbb01213709387c"
"Rev": "f6a14f0423bcd7a0ae907ace2795e63ec5f9fe51"
},
{
"ImportPath": "golang.org/x/oauth2/internal",
"Rev": "7e9cd5d59563851383f8f81a7fbb01213709387c"
"Rev": "f6a14f0423bcd7a0ae907ace2795e63ec5f9fe51"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "f64b50fbea64174967a8882830d621a18ee1548e"
"Rev": "b776ec39b3e54652e09028aaaaac9757f4f8211a"
},
{
"ImportPath": "gopkg.in/asn1-ber.v1",

View file

@ -2,21 +2,18 @@ package api
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-rootcerts"
)
const EnvVaultAddress = "VAULT_ADDR"
@ -84,9 +81,9 @@ func (c *Config) ReadEnvironment() error {
var foundInsecure bool
var envTLSServerName string
var newCertPool *x509.CertPool
var clientCert tls.Certificate
var foundClientCert bool
var err error
if v := os.Getenv(EnvVaultAddress); v != "" {
envAddress = v
@ -116,16 +113,6 @@ func (c *Config) ReadEnvironment() error {
}
// If we need custom TLS configuration, then set it
if envCACert != "" || envCAPath != "" || envClientCert != "" || envClientKey != "" || envInsecure {
var err error
if envCACert != "" {
newCertPool, err = LoadCACert(envCACert)
} else if envCAPath != "" {
newCertPool, err = LoadCAPath(envCAPath)
}
if err != nil {
return fmt.Errorf("Error setting up CA path: %s", err)
}
if envClientCert != "" && envClientKey != "" {
clientCert, err = tls.LoadX509KeyPair(envClientCert, envClientKey)
if err != nil {
@ -145,9 +132,7 @@ func (c *Config) ReadEnvironment() error {
if foundInsecure {
clientTLSConfig.InsecureSkipVerify = envInsecure
}
if newCertPool != nil {
clientTLSConfig.RootCAs = newCertPool
}
if foundClientCert {
clientTLSConfig.Certificates = []tls.Certificate{clientCert}
}
@ -155,6 +140,15 @@ func (c *Config) ReadEnvironment() error {
clientTLSConfig.ServerName = envTLSServerName
}
rootConfig := &rootcerts.Config{
CAFile: envCACert,
CAPath: envCAPath,
}
err = rootcerts.ConfigureTLS(clientTLSConfig, rootConfig)
if err != nil {
return err
}
return nil
}
@ -310,74 +304,3 @@ START:
return result, nil
}
// Loads the certificate from given path and creates a certificate pool from it.
func LoadCACert(path string) (*x509.CertPool, error) {
certs, err := loadCertFromPEM(path)
if err != nil {
return nil, err
}
result := x509.NewCertPool()
for _, cert := range certs {
result.AddCert(cert)
}
return result, nil
}
// Loads the certificates present in the given directory and creates a
// certificate pool from it.
func LoadCAPath(path string) (*x509.CertPool, error) {
result := x509.NewCertPool()
fn := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
certs, err := loadCertFromPEM(path)
if err != nil {
return err
}
for _, cert := range certs {
result.AddCert(cert)
}
return nil
}
return result, filepath.Walk(path, fn)
}
// Creates a certificate from the given path
func loadCertFromPEM(path string) ([]*x509.Certificate, error) {
pemCerts, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
certs := make([]*x509.Certificate, 0, 5)
for len(pemCerts) > 0 {
var block *pem.Block
block, pemCerts = pem.Decode(pemCerts)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certs = append(certs, cert)
}
return certs, nil
}

View file

@ -9,6 +9,7 @@ import (
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/mitchellh/mapstructure"
@ -85,17 +86,14 @@ func (c *SSHHelperConfig) NewClient() (*Client, error) {
// Check if certificates are provided via config file.
if c.CACert != "" || c.CAPath != "" || c.TLSSkipVerify {
var certPool *x509.CertPool
var err error
if c.CACert != "" {
certPool, err = LoadCACert(c.CACert)
} else if c.CAPath != "" {
certPool, err = LoadCAPath(c.CAPath)
rootConfig := &rootcerts.Config{
CAFile: c.CACert,
CAPath: c.CAPath,
}
certPool, err := rootcerts.LoadCACerts(rootConfig)
if err != nil {
return nil, err
}
// Enable TLS on the HTTP client information
c.SetTLSParameters(clientConfig, certPool)
}

View file

@ -8,13 +8,226 @@ import (
"testing"
"time"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/mitchellh/mapstructure"
)
const (
serverCertPath = "test-fixtures/cacert.pem"
serverKeyPath = "test-fixtures/cakey.pem"
serverCAPath = serverCertPath
testRootCACertPath1 = "test-fixtures/testcacert1.pem"
testRootCAKeyPath1 = "test-fixtures/testcakey1.pem"
testCertPath1 = "test-fixtures/testissuedcert4.pem"
testKeyPath1 = "test-fixtures/testissuedkey4.pem"
testIssuedCertCRL = "test-fixtures/issuedcertcrl"
testRootCACertPath2 = "test-fixtures/testcacert2.pem"
testRootCAKeyPath2 = "test-fixtures/testcakey2.pem"
testRootCertCRL = "test-fixtures/cacert2crl"
)
// Unlike testConnState, this method does not use the same 'tls.Config' objects for
// both dialing and listening. Instead, it runs the server without specifying its CA.
// But the client, presents the CA cert of the server to trust the server.
// The client can present a cert and key which is completely independent of server's CA.
// The connection state returned will contain the certificate presented by the client.
func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) tls.ConnectionState {
serverKeyPair, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath)
if err != nil {
t.Fatal(err)
}
// Prepare the listener configuration with server's key pair
listenConf := &tls.Config{
Certificates: []tls.Certificate{serverKeyPair},
ClientAuth: tls.RequestClientCert,
}
clientKeyPair, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath)
if err != nil {
t.Fatal(err)
}
// Load the CA cert required by the client to authenticate the server.
rootConfig := &rootcerts.Config{
CAFile: serverCAPath,
}
serverCAs, err := rootcerts.LoadCACerts(rootConfig)
if err != nil {
t.Fatal(err)
}
// Prepare the dial configuration that the client uses to establish the connection.
dialConf := &tls.Config{
Certificates: []tls.Certificate{clientKeyPair},
RootCAs: serverCAs,
}
// Start the server.
list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
if err != nil {
t.Fatal(err)
}
defer list.Close()
// Establish a connection from the client side and write a few bytes.
go func() {
addr := list.Addr().String()
conn, err := tls.Dial("tcp", addr, dialConf)
if err != nil {
t.Fatalf("err: %v", err)
}
defer conn.Close()
// Write ping
conn.Write([]byte("ping"))
}()
// Accept the connection on the server side.
serverConn, err := list.Accept()
if err != nil {
t.Fatal(err)
}
defer serverConn.Close()
// Read the ping
buf := make([]byte, 4)
serverConn.Read(buf)
// Grab the current state
connState := serverConn.(*tls.Conn).ConnectionState()
return connState
}
func failOnError(t *testing.T, resp *logical.Response, err error) {
if resp != nil && resp.IsError() {
t.Fatalf("error returned in response: %s", resp.Data["error"])
}
if err != nil {
t.Fatal(err)
}
}
func TestBackend_CRLs(t *testing.T) {
config := logical.TestBackendConfig()
storage := &logical.InmemStorage{}
config.StorageView = storage
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
clientCA1, err := ioutil.ReadFile(testRootCACertPath1)
if err != nil {
t.Fatal(err)
}
// Register the CA certificate of the client key pair
certData := map[string]interface{}{
"certificate": clientCA1,
"policies": "abc",
"display_name": "cert1",
"ttl": 10000,
}
certReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "certs/cert1",
Storage: storage,
Data: certData,
}
resp, err := b.HandleRequest(certReq)
failOnError(t, resp, err)
// Connection state is presenting the client CA cert and its key.
// This is exactly what is registered at the backend.
connState := connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1)
loginReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
Path: "login",
Connection: &logical.Connection{
ConnState: &connState,
},
}
resp, err = b.HandleRequest(loginReq)
failOnError(t, resp, err)
// Now, without changing the registered client CA cert, present from
// the client side, a cert issued using the registered CA.
connState = connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
loginReq.Connection.ConnState = &connState
// Attempt login with the updated connection
resp, err = b.HandleRequest(loginReq)
failOnError(t, resp, err)
// Register a CRL containing the issued client certificate used above.
issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL)
if err != nil {
t.Fatal(err)
}
crlData := map[string]interface{}{
"crl": issuedCRL,
}
crlReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
Path: "crls/issuedcrl",
Data: crlData,
}
resp, err = b.HandleRequest(crlReq)
failOnError(t, resp, err)
// Attempt login with the revoked certificate.
resp, err = b.HandleRequest(loginReq)
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.IsError() {
t.Fatalf("expected failure due to revoked certificate")
}
// Register a different client CA certificate.
clientCA2, err := ioutil.ReadFile(testRootCACertPath2)
if err != nil {
t.Fatal(err)
}
certData["certificate"] = clientCA2
resp, err = b.HandleRequest(certReq)
failOnError(t, resp, err)
// Test login using a different client CA cert pair.
connState = connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2)
loginReq.Connection.ConnState = &connState
// Attempt login with the updated connection
resp, err = b.HandleRequest(loginReq)
failOnError(t, resp, err)
// Register a CRL containing the root CA certificate used above.
rootCRL, err := ioutil.ReadFile(testRootCertCRL)
if err != nil {
t.Fatal(err)
}
crlData["crl"] = rootCRL
resp, err = b.HandleRequest(crlReq)
failOnError(t, resp, err)
// Attempt login with the same connection state but with the CRL registered
resp, err = b.HandleRequest(loginReq)
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.IsError() {
t.Fatalf("expected failure due to revoked certificate")
}
}
func testFactory(t *testing.T) logical.Backend {
b, err := Factory(&logical.BackendConfig{
System: &logical.StaticSystemView{
@ -84,7 +297,7 @@ func TestBackend_basic_CA(t *testing.T) {
}
// Test CRL behavior
func TestBackend_CRLs(t *testing.T) {
func TestBackend_Basic_CRLs(t *testing.T) {
connState := testConnState(t, "test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
@ -345,7 +558,10 @@ func testConnState(t *testing.T, certPath, keyPath, rootCertPath string) tls.Con
if err != nil {
t.Fatalf("err: %v", err)
}
rootCAs, err := api.LoadCACert(rootCertPath)
rootConfig := &rootcerts.Config{
CAFile: rootCertPath,
}
rootCAs, err := rootcerts.LoadCACerts(rootConfig)
if err != nil {
t.Fatalf("err: %v", err)
}

View file

@ -0,0 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDPjCCAiagAwIBAgIUXiEDuecwua9+j1XHLnconxQ/JBcwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwMzU4WhgPMjA2
NjA0MjAxNjA0MjhaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwWPjnTqnkc6acah+wWLmdTK0oCrf2687XVhx
VP3IN897TYzkaBQ2Dn1UM2VEL71sE3OZSVm0UWs5n7UqRuDp6mvkvrT2q5zgh/bV
zg9ZL1AI5H7dY2Rsor95I849ymFpXZooMgNtIQLxIeleBwzTnVSkFl8RqKM7NkjZ
wvBafQEjSsYk9050Bu0GMLgFJYRo1LozJLbwIs5ykG5F5PWTMfRvLCgLBzixPb75
unIJ29nL0yB7zzUdkM8CG1EX8NkjGLEnpRnPa7+RMf8bd10v84cr0JFCUQmoabks
sqVyA825/1we2r5Y8blyXZVIr2lcPyGocLDxz1qT1MqxrNQIywIDAQABo4GBMH8w
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBTo2I+W
3Wb2MBe3OWuj5qCbafavMB8GA1UdIwQYMBaAFBTo2I+W3Wb2MBe3OWuj5qCbafav
MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
AQAyjJzDMzf28yMgiu//2R6LD3+zuLHlfX8+p5JB7WDBT7CgSm89gzMRtD2DvqZQ
6iLbZv/x7Td8bdLsOKf3LDCkZyOygJ0Sr9+6YZdc9heWO8tsO/SbcLhj9/vK8YyV
5fJo+vECW8I5zQLeTKfPqJtTU0zFspv0WYCB96Hsbhd1hTfHmVgjBoxi0YuduAa8
3EHuYPfTYkO3M4QJCoQ+3S6LXSTDqppd1KGAy7QhRU6shd29EpSVxhgqZ+CIOpZu
3RgPOgPqfqcOD/v/SRPqhRf+P5O5Dc/N4ZXTZtfJbaY0qE+smpeQUskVQ2TrSqha
UYpNk7+toZW3Gioo0lBD3gH2
-----END CERTIFICATE-----

View file

@ -0,0 +1,12 @@
-----BEGIN X509 CRL-----
MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtteXZhdWx0LmNvbRcN
MTYwNTAyMTYxNDMzWhcNMTYwNTA1MTYxNDMzWjArMCkCFCXxxcbS0ATpI2PYrx8d
ACLEQ3B9FxExNjA1MDIxMjE0MzMtMDQwMKAjMCEwHwYDVR0jBBgwFoAUwsRNYCw4
U2won66rMKEJm8inFfgwDQYJKoZIhvcNAQELBQADggEBAD/VvoRK4eaEDzG7Z95b
fHL5ubJGkyvkp8ruNu+rfQp8NLgFVvY6a93Hz7WLOhACkKIWJ63+/4vCfDi5uU0B
HW2FICHdlSQ+6DdGJ6MrgujALlyT+69iF+fPiJ/M1j/N7Am8XPYYcfNdSK6CHtfg
gHNB7E+ubBA7lIw7ucIkoiJjXrSWSXTs9/GzLUImiXJAKQ+JzPYryIsGKXKAwgHh
HB56BnJ2vOs7+6UxQ6fjKTMxYdNgoZ34MhkkxNNhylrEndO6XUvUvC1f/1p1wlzy
xTq2MrMfJHJyu08rkrD+kwMPH2uoVwKyDhXdRBP0QrvQwOsvNEhW8LTKwLWkK17b
fEI=
-----END X509 CRL-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAwWPjnTqnkc6acah+wWLmdTK0oCrf2687XVhxVP3IN897TYzk
aBQ2Dn1UM2VEL71sE3OZSVm0UWs5n7UqRuDp6mvkvrT2q5zgh/bVzg9ZL1AI5H7d
Y2Rsor95I849ymFpXZooMgNtIQLxIeleBwzTnVSkFl8RqKM7NkjZwvBafQEjSsYk
9050Bu0GMLgFJYRo1LozJLbwIs5ykG5F5PWTMfRvLCgLBzixPb75unIJ29nL0yB7
zzUdkM8CG1EX8NkjGLEnpRnPa7+RMf8bd10v84cr0JFCUQmoabkssqVyA825/1we
2r5Y8blyXZVIr2lcPyGocLDxz1qT1MqxrNQIywIDAQABAoIBAD1pBd9ov8t6Surq
sY2hZUM0Hc16r+ln5LcInbx6djjaxvHiWql+OYgyXimP764lPYuTuspjFPKB1SOU
+N7XDxCkwFeayXXHdDlYtZ4gm5Z9mMVOT+j++8xWdxZaqJ56fmX9zOPM2LuR3paB
L52Xgh9EwHJmMApYAzaCvbu8bU+iHeNTW80xabxQrp9VCu/A1BXUX06jK4T+wmjZ
kDA82uQp3dCOF1tv/10HgwqkJj6/1jjM0XUzUZR6iV85S6jrA7wD7gDDeqNO8YHN
08YMRgTKk4pbA7AqoC5xbL3gbSjsjyw48KRq0FkdkjsgV0PJZRMUU9fv9puDa23K
WRPa8LECgYEAyeth5bVH8FXnVXIAAFU6W0WdgCK3VakhjItLw0eoxshuTwbVq64w
CNOB8y1pfP83WiJjX3qRG43NDW07X69J57YKtCCb6KICVUPmecgYZPkmegD1HBQZ
5+Aak+5pIUQuycQ0t65yHGu4Jsju05gEFgdzydFjNANgiPxRzZxzAkkCgYEA9S+y
ZR063oCQDg/GhMLCx19nCJyU44Figh1YCD6kTrsSTECuRpQ5B1F9a+LeZT2wnYxv
+qMvvV+lfVY73f5WZ567u2jSDIsCH34p4g7sE25lKwo+Lhik6EtOehJFs2ZUemaT
Ym7EjqWlC1whrG7P4MnTGzPOVNAGAxsGPtT58nMCgYAs/R8A2VU//UPfy9ioOlUY
RPiEtjd3BIoPEHI+/lZihAHf5bvx1oupS8bmcbXRPeQNVyAhA+QU6ZFIbpAOD7Y9
xFe6LpHOUVqHuOs/MxAMX17tTA1QxkHHYi1JzJLr8I8kMW01h86w+mc7bQWZa4Nt
jReFXfvmeOInY2CumS8e0QKBgC23ow/vj1aFqla04lNG7YK3a0LTz39MVM3mItAG
viRgBV1qghRu9uNCcpx3RPijtBbsZMTbQL+S4gyo06jlD79qfZ7IQMJN+SteHvkj
xykoYHzSAB4gQj9+KzffyFdXMVFRZxHnjYb7o/amSzEXyHMlrtNXqZVu5HAXzeZR
V/m5AoGAAStS43Q7qSJSMfMBITKMdKlqCObnifD77WeR2WHGrpkq26300ggsDpMS
UTmnAAo77lSMmDsdoNn2XZmdeTu1CPoQnoZSE5CqPd5GeHA/hhegVCdeYxSXZJoH
Lhiac+AhCEog/MS1GmVsjynD7eDGVFcsJ6SWuam7doKfrpPqPnE=
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,67 @@
vault mount pki
vault mount-tune -max-lease-ttl=438000h pki
vault write pki/root/generate/exported common_name=myvault.com ttl=438000h ip_sans=127.0.0.1
vi cacert.pem
vi cakey.pem
vaultcert.hcl
backend "inmem" {
}
disable_mlock = true
default_lease_ttl = "700h"
max_lease_ttl = "720h"
listener "tcp" {
address = "127.0.0.1:8200"
tls_cert_file = "./cacert.pem"
tls_key_file = "./cakey.pem"
}
========================================
vault mount pki
vault mount-tune -max-lease-ttl=438000h pki
vault write pki/root/generate/exported common_name=myvault.com ttl=438000h max_ttl=438000h ip_sans=127.0.0.1
vi testcacert1.pem
vi testcakey1.pem
vi testcaserial1
vault write pki/config/urls issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl"
vault write pki/roles/myvault-dot-com allowed_domains=myvault.com allow_subdomains=true ttl=437999h max_ttl=438000h allow_ip_sans=true
vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
vi testissuedserial1
vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
vi testissuedcert2.pem
vi testissuedkey2.pem
vi testissuedserial2
vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
vi testissuedserial3
vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
vi testissuedcert4.pem
vi testissuedkey4.pem
vi testissuedserial4
vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
vi testissuedserial5
vault write pki/revoke serial_number=$(cat testissuedserial2)
vault write pki/revoke serial_number=$(cat testissuedserial4)
curl -XGET "http://127.0.0.1:8200/v1/pki/crl/pem" -H "x-vault-token:123" > issuedcertcrl
openssl crl -in issuedcertcrl -noout -text
========================================
export VAULT_ADDR='http://127.0.0.1:8200'
vault mount pki
vault mount-tune -max-lease-ttl=438000h pki
vault write pki/root/generate/exported common_name=myvault.com ttl=438000h ip_sans=127.0.0.1
vi testcacert2.pem
vi testcakey2.pem
vi testcaserial2
vi testcacert2leaseid
vault write pki/config/urls issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl"
vault revoke $(cat testcacert2leaseid)
curl -XGET "http://127.0.0.1:8200/v1/pki/crl/pem" -H "x-vault-token:123" > cacert2crl
openssl crl -in cacert2crl -noout -text

View file

@ -0,0 +1,12 @@
-----BEGIN X509 CRL-----
MIIB2TCBwjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtteXZhdWx0LmNvbRcN
MTYwNTAyMTYxMTA4WhcNMTYwNTA1MTYxMTA4WjBWMCkCFAS6oenLRllQ1MRYcSV+
5ukv2563FxExNjA1MDIxMjExMDgtMDQwMDApAhQaQdPJfbIwE3q4nyYp60lVnZaE
5hcRMTYwNTAyMTIxMTA1LTA0MDCgIzAhMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRX
AOeMiUdBfHFyMA0GCSqGSIb3DQEBCwUAA4IBAQBD2jkeOAmkDdYkAXbmjLGdHaQI
WMS/M+wtFnHVIDVQEmUmj/KPsrkshTZv2UgCHIxBha6y+kXUMQFMg6FwriDTB170
WyJVDVhGg2WjiQjnzrzEI+iOmcpx60sPPXE63J/Zxo4QS5M62RTXRq3909HQTFI5
f3xf0pog8mOrv5uQxO1SACP6YFtdDE2dGOVwoIPuNMTY5vijnj8I9dAw8VrbdoBX
m/Ky56kT+BpmVWHKwQd1nEcP/RHSKbZwwJzJG0BoGM8cvzjITtBmpEF+OZcea81x
p9XJkpfFeiVIgzxks3zTeuQjLF8u+MDcdGt0ztHEbkswjxuk1cCovZe2GFr4
-----END X509 CRL-----

View file

@ -0,0 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDPjCCAiagAwIBAgIUfIKsF2VPT7sdFcKOHJH2Ii6K4MwwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwNTQyWhgPMjA2
NjA0MjAxNjA2MTJaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdS
xz9hfymuJb+cN8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP
67HDzVZhGBHlHTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xE
JsHQPYS9ASe2eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUb
cCcIZyk4QVFZ1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SY
WrCONRw61A5Zwx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABo4GBMH8w
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOuKvPiU
G06iHkRXAOeMiUdBfHFyMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRXAOeMiUdBfHFy
MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
AQBcN/UdAMzc7UjRdnIpZvO+5keBGhL/vjltnGM1dMWYHa60Y5oh7UIXF+P1RdNW
n7g80lOyvkSR15/r1rDkqOK8/4oruXU31EcwGhDOC4hU6yMUy4ltV/nBoodHBXNh
MfKiXeOstH1vdI6G0P6W93Bcww6RyV1KH6sT2dbETCw+iq2VN9CrruGIWzd67UT/
spe/kYttr3UYVV3O9kqgffVVgVXg/JoRZ3J7Hy2UEXfh9UtWNanDlRuXaZgE9s/d
CpA30CHpNXvKeyNeW2ktv+2nAbSpvNW+e6MecBCTBIoDSkgU8ShbrzmDKVwNN66Q
5gn6KxUPBKHEtNzs5DgGM7nq
-----END CERTIFICATE-----

View file

@ -0,0 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDPjCCAiagAwIBAgIUJfHFxtLQBOkjY9ivHx0AIsRDcH0wDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYxMjI5WhgPMjA2
NjA0MjAxNjEyNTlaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqj8ANjAGrg5BgUb3owGwUHlMYDxljMdwroA/
Bv76ESjomj1zCyVtoJxlDZ8m9VcKQldk5ashFNuY+Ms9FrJ1YsePvsfStNe37C26
2uldDToh5rm7K8uwp/bQiErwM9QZMCVYCPEH8QgETPg9qWnikDFLMqcLBNbIiXVL
alxEYgA1Qt6+ayMvoS35288hFdZj6a0pCF0+zMHORZxloPhkXWnZLp5lWBiunSJG
0kVz56TjF+oY0L74iW4y3x2805biisGvFqgpZJW8/hLw/kDthNylNTzEqBktsctQ
BXpSMcwG3woJ0uZ8cH/HA/m0VDeIA77UisXnlLiQDpdB7U7QPwIDAQABo4GBMH8w
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMLETWAs
OFNsKJ+uqzChCZvIpxX4MB8GA1UdIwQYMBaAFMLETWAsOFNsKJ+uqzChCZvIpxX4
MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
AQCRlFb6bZDrq3NkoZF9evls7cT41V3XCdykMA4K9YRgDroZ5psanSvYEnSrk9cU
Y7sVYW7b8qSRWkLZrHCAwc2V0/i5F5j4q9yVnWaTZ+kOVCFYCI8yUS7ixRQdTLNN
os/r9dcRSzzTEqoQThAzn571yRcbJHzTjda3gCJ5F4utYUBU2F9WK+ukW9nqfepa
ju5vEEGDuL2+RyApzL0nGzMUkCdBcK82QBksTlElPnbICbJZWUUMTZWPaZ7WGDDa
Pj+pWMXiDQmzIuzgXUCNtQL6lEv4tQwGYRHjjPmhgJP4sr6Cyrj4G0iljrqM+z/3
gLyJOlNU8c5x02/C1nFDDa14
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdSxz9hfymuJb+c
N8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP67HDzVZhGBHl
HTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xEJsHQPYS9ASe2
eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUbcCcIZyk4QVFZ
1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SYWrCONRw61A5Z
wx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABAoIBAG3bCo7ljMQb6tel
CAUjL5Ilqz5a9ebOsONABRYLOclq4ePbatxawdJF7/sSLwZxKkIJnZtvr2Hkubxg
eOO8KC0YbVS9u39Rjc2QfobxHfsojpbWSuCJl+pvwinbkiUAUxXR7S/PtCPJKat/
fGdYCiMQ/tqnynh4vR4+/d5o12c0KuuQ22/MdEf3GOadUamRXS1ET9iJWqla1pJW
TmzrlkGAEnR5PPO2RMxbnZCYmj3dArxWAnB57W+bWYla0DstkDKtwg2j2ikNZpXB
nkZJJpxR76IYD1GxfwftqAKxujKcyfqB0dIKCJ0UmfOkauNWjexroNLwaAOC3Nud
XIxppAECgYEA1wJ9EH6A6CrSjdzUocF9LtQy1LCDHbdiQFHxM5/zZqIxraJZ8Gzh
Q0d8JeOjwPdG4zL9pHcWS7+x64Wmfn0+Qfh6/47Vy3v90PIL0AeZYshrVZyJ/s6X
YkgFK80KEuWtacqIZ1K2UJyCw81u/ynIl2doRsIbgkbNeN0opjmqVTMCgYEA3CkW
2fETWK1LvmgKFjG1TjOotVRIOUfy4iN0kznPm6DK2PgTF5DX5RfktlmA8i8WPmB7
YFOEdAWHf+RtoM/URa7EAGZncCWe6uggAcWqznTS619BJ63OmncpSWov5Byg90gJ
48qIMY4wDjE85ypz1bmBc2Iph974dtWeDtB7dsECgYAyKZh4EquMfwEkq9LH8lZ8
aHF7gbr1YeWAUB3QB49H8KtacTg+iYh8o97pEBUSXh6hvzHB/y6qeYzPAB16AUpX
Jdu8Z9ylXsY2y2HKJRu6GjxAewcO9bAH8/mQ4INrKT6uIdx1Dq0OXZV8jR9KVLtB
55RCfeLhIBesDR0Auw9sVQKBgB0xTZhkgP43LF35Ca1btgDClNJGdLUztx8JOIH1
HnQyY/NVIaL0T8xO2MLdJ131pGts+68QI/YGbaslrOuv4yPCQrcS3RBfzKy1Ttkt
TrLFhtoy7T7HqyeMOWtEq0kCCs3/PWB5EIoRoomfOcYlOOrUCDg2ge9EP4nyVVz9
hAGBAoGBAJXw/ufevxpBJJMSyULmVWYr34GwLC1OhSE6AVVt9JkIYnc5L4xBKTHP
QNKKJLmFmMsEqfxHUNWmpiHkm2E0p37Zehui3kywo+A4ybHPTua70ZWQfZhKxLUr
PvJa8JmwiCM7kO8zjOv+edY1mMWrbjAZH1YUbfcTHmST7S8vp0F3
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAqj8ANjAGrg5BgUb3owGwUHlMYDxljMdwroA/Bv76ESjomj1z
CyVtoJxlDZ8m9VcKQldk5ashFNuY+Ms9FrJ1YsePvsfStNe37C262uldDToh5rm7
K8uwp/bQiErwM9QZMCVYCPEH8QgETPg9qWnikDFLMqcLBNbIiXVLalxEYgA1Qt6+
ayMvoS35288hFdZj6a0pCF0+zMHORZxloPhkXWnZLp5lWBiunSJG0kVz56TjF+oY
0L74iW4y3x2805biisGvFqgpZJW8/hLw/kDthNylNTzEqBktsctQBXpSMcwG3woJ
0uZ8cH/HA/m0VDeIA77UisXnlLiQDpdB7U7QPwIDAQABAoIBADivQ2XHdeHsUzk1
JOz8efVBfgGo+nL2UPl5MAMnUKH4CgKZJT3311mb2TXA4RrdQUg3ixvBcAFe4L8u
BIgTIWyjX6Q5KloWXWHhFA8hll76FSGag8ygRJCYaHSI5xOKslxKgtZvUqKZdb0f
BoDrBYnXL9+MqOmSjjDegh7G2+n49n774Z2VVR47TZTBB5LCWDWj4AtEcalgwlvw
d5yL/GU/RfCkXCjCeie1pInp3eCMUI9jlvbe/vyaoFq2RiaJw1LSlJLXZBMYzaij
XkgMtRsr5bf0Tg2z3SPiaa9QZogfVLqHWAt6RHZf9Keidtiho+Ad6/dzJu+jKDys
Z6cthOECgYEAxMUCIYKO74BtPRN2r7KxbSjHzFsasxbfwkSg4Qefd4UoZJX2ShlL
cClnef3WdkKxtShJhqEPaKTYTrfgM+iz/a9+3lAFnS4EZawSf3YgXXslVTory0Da
yPQZKxX6XsupaLl4s13ehw/D0qfdxWVYaiFad3ePEE4ytmSkMMHLHo8CgYEA3X4a
jMWVbVv1W1lj+LFcg7AhU7lHgla+p7NI4gHw9V783noafnW7/8pNF80kshYo4u0g
aJRwaU/Inr5uw14eAyEjB4X7N8AE5wGmcxxS2uluGG6r3oyQSJBqktGnLwyTfcfC
XrfsGJza2BRGF4Mn8SFb7WtCl3f1qu0hTF+mC1ECgYB4oA1eXZsiV6if+H6Z1wHN
2WIidPc5MpyZi1jUmse3jXnlr8j8Q+VrLPayYlpGxTwLwlbQoYvAqs2v9CkNqWot
6pfr0UKfyMYJTiNI4DGXHRcV2ENgprF436tOLnr+AfwopwrHapQwWAnD6gSaLja1
WR0Mf87EQCv2hFvjR+otIQKBgQCLyvJQ1MeZzQdPT1zkcnSUfM6b+/1hCwSr7WDb
nCQLiZcJh4E/PWmZaII9unEloQzPJKBmwQEtxng1kLVxwu4oRXrJXcuPhTbS4dy/
HCpDFj8xVnBNNuQ9mEBbR80/ya0xHqnThDuT0TPiWvFeF55W9xoA/8h4tvKrnZx9
ioTO8QKBgCMqRa5pHb+vCniTWUTz9JZRnRsdq7fRSsJHngMe5gOR4HylyAmmqKrd
kEXfkdu9TH2jxSWcZbHUPVwKfOUqQUZMz0pml0DIs1kedUDFanTZ8Rgg5SGUHBW0
5bNCq64tKMmw6GiicaAGqd04OPo85WD9h8mPhM1Jdv/UmTV+HFAr
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,22 @@
-----BEGIN CERTIFICATE-----
MIIDtzCCAp+gAwIBAgIUBLqh6ctGWVDUxFhxJX7m6S/bnrcwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwOTI2WhgPMjA2
NjA0MjAxNTA5NTZaMBsxGTAXBgNVBAMTEGNlcnQubXl2YXVsdC5jb20wggEiMA0G
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY3gPB29kkdbu0mPO6J0efagQhSiXB
9OyDuLf5sMk6CVDWVWal5hISkyBmw/lXgF7qC2XFKivpJOrcGQd5Ep9otBqyJLzI
b0IWdXuPIrVnXDwcdWr86ybX2iC42zKWfbXgjzGijeAVpl0UJLKBj+fk5q6NvkRL
5FUL6TRV7Krn9mrmnrV9J5IqV15pTd9W2aVJ6IqWvIPCACtZKulqWn4707uy2X2W
1Stq/5qnp1pDshiGk1VPyxCwQ6yw3iEcgecbYo3vQfhWcv7Q8LpSIM9ZYpXu6OmF
+czqRZS9gERl+wipmmrN1MdYVrTuQem21C/PNZ4jo4XUk1SFx6JrcA+lAgMBAAGj
gfUwgfIwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSe
Cl9WV3BjGCwmS/KrDSLRjfwyqjAfBgNVHSMEGDAWgBTrirz4lBtOoh5EVwDnjIlH
QXxxcjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0dHA6Ly8xMjcuMC4w
LjE6ODIwMC92MS9wa2kvY2EwIQYDVR0RBBowGIIQY2VydC5teXZhdWx0LmNvbYcE
fwAAATAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vMTI3LjAuMC4xOjgyMDAvdjEv
cGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAWGholPN8buDYwKbUiDavbzjsxUIX
lU4MxEqOHw7CD3qIYIauPboLvB9EldBQwhgOOy607Yvdg3rtyYwyBFwPhHo/hK3Z
6mn4hc6TF2V+AUdHBvGzp2dbYLeo8noVoWbQ/lBulggwlIHNNF6+a3kALqsqk1Ch
f/hzsjFnDhAlNcYFgG8TgfE2lE/FckvejPqBffo7Q3I+wVAw0buqiz5QL81NOT+D
Y2S9LLKLRaCsWo9wRU1Az4Rhd7vK5SEMh16jJ82GyEODWPvuxOTI1MnzfnbWyLYe
TTp6YBjGMVf1I6NEcWNur7U17uIOiQjMZ9krNvoMJ1A/cxCoZ98QHgcIPg==
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA2N4DwdvZJHW7tJjzuidHn2oEIUolwfTsg7i3+bDJOglQ1lVm
peYSEpMgZsP5V4Be6gtlxSor6STq3BkHeRKfaLQasiS8yG9CFnV7jyK1Z1w8HHVq
/Osm19oguNsyln214I8xoo3gFaZdFCSygY/n5Oaujb5ES+RVC+k0Veyq5/Zq5p61
fSeSKldeaU3fVtmlSeiKlryDwgArWSrpalp+O9O7stl9ltUrav+ap6daQ7IYhpNV
T8sQsEOssN4hHIHnG2KN70H4VnL+0PC6UiDPWWKV7ujphfnM6kWUvYBEZfsIqZpq
zdTHWFa07kHpttQvzzWeI6OF1JNUhceia3APpQIDAQABAoIBAQCH3vEzr+3nreug
RoPNCXcSJXXY9X+aeT0FeeGqClzIg7Wl03OwVOjVwl/2gqnhbIgK0oE8eiNwurR6
mSPZcxV0oAJpwiKU4T/imlCDaReGXn86xUX2l82KRxthNdQH/VLKEmzij0jpx4Vh
bWx5SBPdkbmjDKX1dmTiRYWIn/KjyNPvNvmtwdi8Qluhf4eJcNEUr2BtblnGOmfL
FdSu+brPJozpoQ1QdDnbAQRgqnh7Shl0tT85whQi0uquqIj1gEOGVjmBvDDnL3GV
WOENTKqsmIIoEzdZrql1pfmYTk7WNaD92bfpN128j8BF7RmAV4/DphH0pvK05y9m
tmRhyHGxAoGBAOV2BBocsm6xup575VqmFN+EnIOiTn+haOvfdnVsyQHnth63fOQx
PNtMpTPR1OMKGpJ13e2bV0IgcYRsRkScVkUtoa/17VIgqZXffnJJ0A/HT67uKBq3
8o7RrtyK5N20otw0lZHyqOPhyCdpSsurDhNON1kPVJVYY4N1RiIxfut/AoGBAPHz
HfsJ5ZkyELE9N/r4fce04lprxWH+mQGK0/PfjS9caXPhj/r5ZkVMvzWesF3mmnY8
goE5S35TuTvV1+6rKGizwlCFAQlyXJiFpOryNWpLwCmDDSzLcm+sToAlML3tMgWU
jM3dWHx3C93c3ft4rSWJaUYI9JbHsMzDW6Yh+GbbAoGBANIbKwxh5Hx5XwEJP2yu
kIROYCYkMy6otHLujgBdmPyWl+suZjxoXWoMl2SIqR8vPD+Jj6mmyNJy9J6lqf3f
DRuQ+fEuBZ1i7QWfvJ+XuN0JyovJ5Iz6jC58D1pAD+p2IX3y5FXcVQs8zVJRFjzB
p0TEJOf2oqORaKWRd6ONoMKvAoGALKu6aVMWdQZtVov6/fdLIcgf0pn7Q3CCR2qe
X3Ry2L+zKJYIw0mwvDLDSt8VqQCenB3n6nvtmFFU7ds5lvM67rnhsoQcAOaAehiS
rl4xxoJd5Ewx7odRhZTGmZpEOYzFo4odxRSM9c30/u18fqV1Mm0AZtHYds4/sk6P
aUj0V+kCgYBMpGrJk8RSez5g0XZ35HfpI4ENoWbiwB59FIpWsLl2LADEh29eC455
t9Muq7MprBVBHQo11TMLLFxDIjkuMho/gcKgpYXCt0LfiNm8EZehvLJUXH+3WqUx
we6ywrbFCs6LaxaOCtTiLsN+GbZCatITL0UJaeBmTAbiw0KQjUuZPQ==
-----END RSA PRIVATE KEY-----

View file

@ -3,6 +3,7 @@ package github
import (
"fmt"
"os"
"strings"
"testing"
"time"
@ -108,7 +109,11 @@ func TestBackend_basic(t *testing.T) {
PreCheck: func() { testAccPreCheck(t) },
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t),
testAccStepConfig(t, false),
testAccMap(t, "default", "root"),
testAccMap(t, "oWnErs", "root"),
testAccLogin(t, []string{"root"}),
testAccStepConfig(t, true),
testAccMap(t, "default", "root"),
testAccMap(t, "oWnErs", "root"),
testAccLogin(t, []string{"root"}),
@ -134,14 +139,18 @@ func testAccPreCheck(t *testing.T) {
}
}
func testAccStepConfig(t *testing.T) logicaltest.TestStep {
return logicaltest.TestStep{
func testAccStepConfig(t *testing.T, upper bool) logicaltest.TestStep {
ts := logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config",
Data: map[string]interface{}{
"organization": os.Getenv("GITHUB_ORG"),
},
}
if upper {
ts.Data["organization"] = strings.ToUpper(os.Getenv("GITHUB_ORG"))
}
return ts
}
func testAccStepConfigWithBaseURL(t *testing.T) logicaltest.TestStep {

View file

@ -3,6 +3,7 @@ package github
import (
"fmt"
"net/url"
"strings"
"github.com/google/go-github/github"
"github.com/hashicorp/vault/helper/policyutil"
@ -143,7 +144,7 @@ func (b *backend) verifyCredentials(req *logical.Request, token string) (*verify
}
for _, o := range allOrgs {
if *o.Login == config.Org {
if strings.ToLower(*o.Login) == strings.ToLower(config.Org) {
org = &o
break
}

View file

@ -92,8 +92,8 @@ func (c *InitCommand) Run(args []string) int {
"\n"+
"Recovery key initialized with %d keys and a key threshold of %d. Please\n"+
"securely distribute the above keys.",
shares,
threshold,
recoveryShares,
recoveryThreshold,
))
}

View file

@ -163,6 +163,14 @@ func (c *ServerCommand) Run(args []string) int {
var seal vault.Seal = &vault.DefaultSeal{}
// Ensure that the seal finalizer is called, even if using verify-only
defer func() {
err = seal.Finalize()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error finalizing seals: %v", err))
}
}()
coreConfig := &vault.CoreConfig{
Physical: backend,
AdvertiseAddr: config.Backend.AdvertiseAddr,
@ -406,11 +414,6 @@ func (c *ServerCommand) Run(args []string) int {
listener.Close()
}
err = seal.Finalize()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error finalizing seals: %v", err))
}
return 0
}
@ -696,7 +699,6 @@ func MakeShutdownCh() chan struct{} {
resultCh := make(chan struct{})
shutdownCh := make(chan os.Signal, 4)
signal.Notify(shutdownCh, os.Interrupt, syscall.SIGINT)
signal.Notify(shutdownCh, os.Interrupt, syscall.SIGTERM)
go func() {
for {
@ -714,7 +716,7 @@ func MakeSighupCh() chan struct{} {
resultCh := make(chan struct{})
signalCh := make(chan os.Signal, 4)
signal.Notify(signalCh, os.Interrupt, syscall.SIGHUP)
signal.Notify(signalCh, syscall.SIGHUP)
go func() {
for {
<-signalCh

View file

@ -92,9 +92,34 @@ func handleLogical(core *vault.Core, dataOnly bool, prepareRequestCallback Prepa
if !ok {
return
}
if (op == logical.ReadOperation || op == logical.ListOperation) && resp == nil {
respondError(w, http.StatusNotFound, nil)
return
switch {
case op == logical.ReadOperation:
if resp == nil {
respondError(w, http.StatusNotFound, nil)
return
}
// Basically: if we have empty "keys" or no keys at all, 404. This
// provides consistency with GET.
case op == logical.ListOperation:
if resp == nil || len(resp.Data) == 0 {
respondError(w, http.StatusNotFound, nil)
return
}
keysInt, ok := resp.Data["keys"]
if !ok || keysInt == nil {
respondError(w, http.StatusNotFound, nil)
return
}
keys, ok := keysInt.([]string)
if !ok {
respondError(w, http.StatusInternalServerError, nil)
return
}
if len(keys) == 0 {
respondError(w, http.StatusNotFound, nil)
return
}
}
// Build the proper response

View file

@ -3,17 +3,13 @@ package meta
import (
"bufio"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/token"
"github.com/mitchellh/cli"
@ -74,20 +70,14 @@ func (m *Meta) Client() (*api.Client, error) {
// existing TLS config
tlsConfig := config.HttpClient.Transport.(*http.Transport).TLSClientConfig
var certPool *x509.CertPool
var err error
if m.flagCACert != "" {
certPool, err = api.LoadCACert(m.flagCACert)
} else if m.flagCAPath != "" {
certPool, err = api.LoadCAPath(m.flagCAPath)
rootConfig := &rootcerts.Config{
CAFile: m.flagCACert,
CAPath: m.flagCAPath,
}
if err != nil {
return nil, errwrap.Wrapf("Error setting up CA path: {{err}}", err)
if err := rootcerts.ConfigureTLS(tlsConfig, rootConfig); err != nil {
return nil, err
}
if certPool != nil {
tlsConfig.RootCAs = certPool
}
if m.flagInsecure {
tlsConfig.InsecureSkipVerify = true
}
@ -175,73 +165,6 @@ func (m *Meta) FlagSet(n string, fs FlagSetFlags) *flag.FlagSet {
return f
}
func (m *Meta) loadCACert(path string) (*x509.CertPool, error) {
certs, err := m.loadCertFromPEM(path)
if err != nil {
return nil, fmt.Errorf("Error loading %s: %s", path, err)
}
result := x509.NewCertPool()
for _, cert := range certs {
result.AddCert(cert)
}
return result, nil
}
func (m *Meta) loadCAPath(path string) (*x509.CertPool, error) {
result := x509.NewCertPool()
fn := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
certs, err := m.loadCertFromPEM(path)
if err != nil {
return fmt.Errorf("Error loading %s: %s", path, err)
}
for _, cert := range certs {
result.AddCert(cert)
}
return nil
}
return result, filepath.Walk(path, fn)
}
func (m *Meta) loadCertFromPEM(path string) ([]*x509.Certificate, error) {
pemCerts, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
certs := make([]*x509.Certificate, 0, 5)
for len(pemCerts) > 0 {
var block *pem.Block
block, pemCerts = pem.Decode(pemCerts)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certs = append(certs, cert)
}
return certs, nil
}
// GeneralOptionsUsage returns the usage documenation for commonly
// available options
func GeneralOptionsUsage() string {
@ -255,7 +178,7 @@ func GeneralOptionsUsage() string {
-ca-path=path Path to a directory of PEM encoded CA cert files
to verify the Vault server SSL certificate. If both
-ca-cert and -ca-path are specified, -ca-path is used.
-ca-cert and -ca-path are specified, -ca-cert is used.
Overrides the VAULT_CAPATH environment variable if set.
-client-cert=path Path to a PEM encoded client certificate for TLS

View file

@ -3,6 +3,7 @@ package vault
import (
"fmt"
"strings"
"sync"
"time"
"github.com/hashicorp/vault/logical"
@ -845,6 +846,14 @@ func (b *SystemBackend) handleMountTuneWrite(
return handleError(err)
}
var lock *sync.RWMutex
switch {
case strings.HasPrefix(path, "auth/"):
lock = &b.Core.authLock
default:
lock = &b.Core.mountsLock
}
// Timing configuration parameters
{
var newDefault, newMax *time.Duration
@ -877,8 +886,9 @@ func (b *SystemBackend) handleMountTuneWrite(
}
if newDefault != nil || newMax != nil {
b.Core.mountsLock.Lock()
defer b.Core.mountsLock.Unlock()
lock.Lock()
defer lock.Unlock()
if err := b.tuneMountTTLs(path, &mountEntry.Config, newDefault, newMax); err != nil {
b.Backend.Logger().Printf("[ERR] sys: tune of path '%s' failed: %v", path, err)
return handleError(err)

View file

@ -1,8 +1,8 @@
package vault
import (
"errors"
"fmt"
"strings"
"time"
)
@ -51,6 +51,9 @@ func (b *SystemBackend) tuneMountTTLs(path string, meConfig *MountConfig, newDef
}
}
origMax := meConfig.MaxLeaseTTL
origDefault := meConfig.DefaultLeaseTTL
if newMax != nil {
meConfig.MaxLeaseTTL = *newMax
}
@ -59,8 +62,17 @@ func (b *SystemBackend) tuneMountTTLs(path string, meConfig *MountConfig, newDef
}
// Update the mount table
if err := b.Core.persistMounts(b.Core.mounts); err != nil {
return errors.New("failed to update mount table")
var err error
switch {
case strings.HasPrefix(path, "auth/"):
err = b.Core.persistAuth(b.Core.auth)
default:
err = b.Core.persistMounts(b.Core.mounts)
}
if err != nil {
meConfig.MaxLeaseTTL = origMax
meConfig.DefaultLeaseTTL = origDefault
return fmt.Errorf("failed to update mount table, rolling back TTL changes")
}
b.Core.logger.Printf("[INFO] core: tuned '%s'", path)

View file

@ -196,12 +196,16 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
},
&framework.Path{
Pattern: "lookup" + framework.OptionalParamRegex("token"),
Pattern: "lookup" + framework.OptionalParamRegex("urltoken"),
Fields: map[string]*framework.FieldSchema{
"urltoken": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to lookup (GET/POST URL parameter)",
},
"token": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to lookup",
Description: "Token to lookup (POST request body)",
},
},
@ -215,12 +219,16 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
},
&framework.Path{
Pattern: "lookup-accessor" + framework.OptionalParamRegex("accessor"),
Pattern: "lookup-accessor" + framework.OptionalParamRegex("urlaccessor"),
Fields: map[string]*framework.FieldSchema{
"urlaccessor": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Accessor of the token to look up (URL parameter)",
},
"accessor": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Accessor of the token to lookup",
Description: "Accessor of the token to look up (request body)",
},
},
@ -238,12 +246,12 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
Fields: map[string]*framework.FieldSchema{
"token": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to lookup",
Description: "Token to look up (unused)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: t.handleLookup,
logical.ReadOperation: t.handleLookupSelf,
},
HelpSynopsis: strings.TrimSpace(tokenLookupHelp),
@ -251,12 +259,16 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
},
&framework.Path{
Pattern: "revoke-accessor" + framework.OptionalParamRegex("accessor"),
Pattern: "revoke-accessor" + framework.OptionalParamRegex("urlaccessor"),
Fields: map[string]*framework.FieldSchema{
"urlaccessor": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Accessor of the token (in URL)",
},
"accessor": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Accessor of the token",
Description: "Accessor of the token (request body)",
},
},
@ -280,12 +292,16 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
},
&framework.Path{
Pattern: "revoke" + framework.OptionalParamRegex("token"),
Pattern: "revoke" + framework.OptionalParamRegex("urltoken"),
Fields: map[string]*framework.FieldSchema{
"urltoken": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to revoke (in URL)",
},
"token": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to revoke",
Description: "Token to revoke (request body)",
},
},
@ -298,12 +314,16 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
},
&framework.Path{
Pattern: "revoke-orphan" + framework.OptionalParamRegex("token"),
Pattern: "revoke-orphan" + framework.OptionalParamRegex("urltoken"),
Fields: map[string]*framework.FieldSchema{
"urltoken": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to revoke (in URL)",
},
"token": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to revoke",
Description: "Token to revoke (request body)",
},
},
@ -321,7 +341,7 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
Fields: map[string]*framework.FieldSchema{
"token": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to renew",
Description: "Token to renew (unused)",
},
"increment": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
@ -339,12 +359,16 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
},
&framework.Path{
Pattern: "renew" + framework.OptionalParamRegex("token"),
Pattern: "renew" + framework.OptionalParamRegex("urltoken"),
Fields: map[string]*framework.FieldSchema{
"urltoken": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to renew (in URL)",
},
"token": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Token to renew",
Description: "Token to renew (request body)",
},
"increment": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
@ -728,7 +752,10 @@ func (ts *TokenStore) lookupByAccessor(accessor string) (string, error) {
func (ts *TokenStore) handleUpdateLookupAccessor(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
accessor := data.Get("accessor").(string)
if accessor == "" {
return nil, &StatusBadRequest{Err: "missing accessor"}
accessor = data.Get("urlaccessor").(string)
if accessor == "" {
return nil, &StatusBadRequest{Err: "missing accessor"}
}
}
tokenID, err := ts.lookupByAccessor(accessor)
@ -773,7 +800,10 @@ func (ts *TokenStore) handleUpdateLookupAccessor(req *logical.Request, data *fra
func (ts *TokenStore) handleUpdateRevokeAccessor(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
accessor := data.Get("accessor").(string)
if accessor == "" {
return nil, &StatusBadRequest{Err: "missing accessor"}
accessor = data.Get("urlaccessor").(string)
if accessor == "" {
return nil, &StatusBadRequest{Err: "missing accessor"}
}
}
tokenID, err := ts.lookupByAccessor(accessor)
@ -1043,7 +1073,10 @@ func (ts *TokenStore) handleRevokeTree(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
id := data.Get("token").(string)
if id == "" {
return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
id = data.Get("urltoken").(string)
if id == "" {
return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
}
}
// Revoke the token and its children
@ -1061,7 +1094,10 @@ func (ts *TokenStore) handleRevokeOrphan(
// Parse the id
id := data.Get("token").(string)
if id == "" {
return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
id = data.Get("urltoken").(string)
if id == "" {
return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
}
}
parent, err := ts.Lookup(req.ClientToken)
@ -1087,11 +1123,20 @@ func (ts *TokenStore) handleRevokeOrphan(
return nil, nil
}
func (ts *TokenStore) handleLookupSelf(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
data.Raw["token"] = req.ClientToken
return ts.handleLookup(req, data)
}
// handleLookup handles the auth/token/lookup/id path for querying information about
// a particular token. This can be used to see which policies are applicable.
func (ts *TokenStore) handleLookup(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
id := data.Get("token").(string)
if id == "" {
id = data.Get("urltoken").(string)
}
if id == "" {
id = req.ClientToken
}
@ -1162,7 +1207,10 @@ func (ts *TokenStore) handleRenew(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
id := data.Get("token").(string)
if id == "" {
return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
id = data.Get("urltoken").(string)
if id == "" {
return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
}
}
incrementRaw := data.Get("increment").(int)

View file

@ -979,6 +979,7 @@ func TestTokenStore_HandleRequest_Lookup(t *testing.T) {
testCoreMakeToken(t, c, root, "client", "3600s", []string{"foo"})
// Test via GET
req = logical.TestRequest(t, logical.ReadOperation, "lookup/client")
resp, err = ts.HandleRequest(req)
if err != nil {
@ -1016,6 +1017,47 @@ func TestTokenStore_HandleRequest_Lookup(t *testing.T) {
t.Fatalf("bad:\n%#v\nexp:\n%#v\n", resp.Data, exp)
}
// Test via POST
req = logical.TestRequest(t, logical.UpdateOperation, "lookup")
req.Data = map[string]interface{}{
"token": "client",
}
resp, err = ts.HandleRequest(req)
if err != nil {
t.Fatalf("err: %v %v", err, resp)
}
if resp == nil {
t.Fatalf("bad: %#v", resp)
}
exp = map[string]interface{}{
"id": "client",
"accessor": resp.Data["accessor"],
"policies": []string{"default", "foo"},
"path": "auth/token/create",
"meta": map[string]string(nil),
"display_name": "token",
"orphan": false,
"num_uses": 0,
"creation_ttl": int64(3600),
"ttl": int64(3600),
"role": "",
}
if resp.Data["creation_time"].(int64) == 0 {
t.Fatalf("creation time was zero")
}
delete(resp.Data, "creation_time")
// Depending on timing of the test this may have ticked down, so accept 3599
if resp.Data["ttl"].(int64) == 3599 {
resp.Data["ttl"] = int64(3600)
}
if !reflect.DeepEqual(resp.Data, exp) {
t.Fatalf("bad:\n%#v\nexp:\n%#v\n", resp.Data, exp)
}
// Test last_renewal_time functionality
req = logical.TestRequest(t, logical.UpdateOperation, "renew/client")
resp, err = ts.HandleRequest(req)

View file

@ -55,7 +55,33 @@ type ContainerListResponse struct {
type Blob struct {
Name string `xml:"Name"`
Properties BlobProperties `xml:"Properties"`
// TODO (ahmetalpbalkan) Metadata
Metadata BlobMetadata `xml:"Metadata"`
}
// BlobMetadata contains various mtadata properties of the blob
type BlobMetadata map[string]string
type blobMetadataEntries struct {
Entries []blobMetadataEntry `xml:",any"`
}
type blobMetadataEntry struct {
XMLName xml.Name
Value string `xml:",chardata"`
}
// UnmarshalXML converts the xml:Metadata into Metadata map
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var entries blobMetadataEntries
if err := d.DecodeElement(&entries, &start); err != nil {
return err
}
for _, entry := range entries.Entries {
if *bm == nil {
*bm = make(BlobMetadata)
}
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
}
return nil
}
// BlobProperties contains various properties of a blob
@ -457,7 +483,7 @@ func (b BlobStorageClient) GetBlobURL(container, name string) string {
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) {
resp, err := b.getBlobRange(container, name, "")
resp, err := b.getBlobRange(container, name, "", nil)
if err != nil {
return nil, err
}
@ -472,8 +498,8 @@ func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) {
resp, err := b.getBlobRange(container, name, bytesRange)
func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) {
resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders)
if err != nil {
return nil, err
}
@ -484,7 +510,7 @@ func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.
return resp.body, nil
}
func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) {
func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) {
verb := "GET"
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
@ -492,6 +518,11 @@ func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*st
if bytesRange != "" {
headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange)
}
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := b.client.exec(verb, uri, headers, nil)
if err != nil {
return nil, err

View file

@ -4,6 +4,7 @@ package storage
import (
"bytes"
"encoding/base64"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
@ -54,6 +55,11 @@ type storageResponse struct {
body io.ReadCloser
}
type odataResponse struct {
storageResponse
odata odataErrorMessage
}
// AzureStorageServiceError contains fields of the error response from
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
// Some fields might be specific to certain calls.
@ -68,6 +74,20 @@ type AzureStorageServiceError struct {
RequestID string
}
type odataErrorMessageMessage struct {
Lang string `json:"lang"`
Value string `json:"value"`
}
type odataErrorMessageInternal struct {
Code string `json:"code"`
Message odataErrorMessageMessage `json:"message"`
}
type odataErrorMessage struct {
Err odataErrorMessageInternal `json:"odata.error"`
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
@ -166,6 +186,12 @@ func (c Client) GetQueueService() QueueServiceClient {
return QueueServiceClient{c}
}
// GetTableService returns a TableServiceClient which can operate on the table
// service of the storage account.
func (c Client) GetTableService() TableServiceClient {
return TableServiceClient{c}
}
// GetFileService returns a FileServiceClient which can operate on the file
// service of the storage account.
func (c Client) GetFileService() FileServiceClient {
@ -228,6 +254,22 @@ func (c Client) buildCanonicalizedHeader(headers map[string]string) string {
return ch
}
func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) {
errMsg := "buildCanonicalizedResourceTable error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := "/" + c.accountName
if len(u.Path) > 0 {
cr += u.Path
}
return cr, nil
}
func (c Client) buildCanonicalizedResource(uri string) (string, error) {
errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri)
@ -236,6 +278,7 @@ func (c Client) buildCanonicalizedResource(uri string) (string, error) {
}
cr := "/" + c.accountName
if len(u.Path) > 0 {
cr += u.Path
}
@ -266,6 +309,7 @@ func (c Client) buildCanonicalizedResource(uri string) (string, error) {
}
}
}
return cr, nil
}
@ -364,6 +408,70 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
body: resp.Body}, nil
}
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) {
req, err := http.NewRequest(verb, url, body)
for k, v := range headers {
req.Header.Add(k, v)
}
httpClient := c.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
respToRet := &odataResponse{}
respToRet.body = resp.Body
respToRet.statusCode = resp.StatusCode
respToRet.headers = resp.Header
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readResponseBody(resp)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body
err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode)
return respToRet, err
}
// try unmarshal as odata.error json
err = json.Unmarshal(respBody, &respToRet.odata)
return respToRet, err
}
return respToRet, nil
}
func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) {
can, err := c.buildCanonicalizedResourceTable(url)
if err != nil {
return "", err
}
strToSign := headers["x-ms-date"] + "\n" + can
hmac := c.computeHmac256(strToSign)
return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil
}
func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) {
var err error
headers["Authorization"], err = c.createSharedKeyLite(url, headers)
if err != nil {
return nil, err
}
return c.execInternalJSON(verb, url, headers, body)
}
func readResponseBody(resp *http.Response) ([]byte, error) {
defer resp.Body.Close()
out, err := ioutil.ReadAll(resp.Body)

View file

@ -0,0 +1,129 @@
package storage
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
)
// TableServiceClient contains operations for Microsoft Azure Table Storage
// Service.
type TableServiceClient struct {
client Client
}
// AzureTable is the typedef of the Azure Table name
type AzureTable string
const (
tablesURIPath = "/Tables"
)
type createTableRequest struct {
TableName string `json:"TableName"`
}
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
func (c *TableServiceClient) getStandardHeaders() map[string]string {
return map[string]string{
"x-ms-version": "2015-02-21",
"x-ms-date": currentTimeRfc1123Formatted(),
"Accept": "application/json;odata=nometadata",
"Accept-Charset": "UTF-8",
"Content-Type": "application/json",
}
}
// QueryTables returns the tables created in the
// *TableServiceClient storage account.
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("GET", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.body)
var respArray queryTablesResponse
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
return nil, err
}
s := make([]AzureTable, len(respArray.TableName))
for i, elem := range respArray.TableName {
s[i] = AzureTable(elem.TableName)
}
return s, nil
}
// CreateTable creates the table given the specific
// name. This function fails if the name is not compliant
// with the specification or the tables already exists.
func (c *TableServiceClient) CreateTable(table AzureTable) error {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
headers := c.getStandardHeaders()
req := createTableRequest{TableName: string(table)}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(req); err != nil {
return err
}
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
resp, err := c.client.execTable("POST", uri, headers, buf)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
return err
}
return nil
}
// DeleteTable deletes the table given the specific
// name. This function fails if the table is not present.
// Be advised: DeleteTable deletes all the entries
// that may be present.
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
uri += fmt.Sprintf("('%s')", string(table))
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("DELETE", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,351 @@
package storage
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
)
const (
partitionKeyNode = "PartitionKey"
rowKeyNode = "RowKey"
tag = "table"
tagIgnore = "-"
continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey"
continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey"
maxTopParameter = 1000
)
type queryTablesResponse struct {
TableName []struct {
TableName string `json:"TableName"`
} `json:"value"`
}
const (
tableOperationTypeInsert = iota
tableOperationTypeUpdate = iota
tableOperationTypeMerge = iota
tableOperationTypeInsertOrReplace = iota
tableOperationTypeInsertOrMerge = iota
)
type tableOperation int
// TableEntity interface specifies
// the functions needed to support
// marshaling and unmarshaling into
// Azure Tables. The struct must only contain
// simple types because Azure Tables do not
// support hierarchy.
type TableEntity interface {
PartitionKey() string
RowKey() string
SetPartitionKey(string) error
SetRowKey(string) error
}
// ContinuationToken is an opaque (ie not useful to inspect)
// struct that Get... methods can return if there are more
// entries to be returned than the ones already
// returned. Just pass it to the same function to continue
// receiving the remaining entries.
type ContinuationToken struct {
NextPartitionKey string
NextRowKey string
}
type getTableEntriesResponse struct {
Elements []map[string]interface{} `json:"value"`
}
// QueryTableEntities queries the specified table and returns the unmarshaled
// entities of type retType.
// top parameter limits the returned entries up to top. Maximum top
// allowed by Azure API is 1000. In case there are more than top entries to be
// returned the function will return a non nil *ContinuationToken. You can call the
// same function again passing the received ContinuationToken as previousContToken
// parameter in order to get the following entries. The query parameter
// is the odata query. To retrieve all the entries pass the empty string.
// The function returns a pointer to a TableEntity slice, the *ContinuationToken
// if there are more entries to be returned and an error in case something went
// wrong.
//
// Example:
// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "")
func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) {
if top > maxTopParameter {
return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top)
}
uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{})
uri += fmt.Sprintf("?$top=%d", top)
if query != "" {
uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query))
}
if previousContToken != nil {
uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey)
}
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("GET", uri, headers, nil)
contToken := extractContinuationTokenFromHeaders(resp.headers)
if err != nil {
return nil, contToken, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, contToken, err
}
retEntries, err := deserializeEntity(retType, resp.body)
if err != nil {
return nil, contToken, err
}
return retEntries, contToken, nil
}
// InsertEntity inserts an entity in the specified table.
// The function fails if there is an entity with the same
// PartitionKey and RowKey in the table.
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, false, "POST"); err != nil {
return checkRespCode(sc, []int{http.StatusCreated})
}
return err
}
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
if specifyKeysInURL {
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
}
headers := c.getStandardHeaders()
var buf bytes.Buffer
if err := injectPartitionAndRowKeys(entity, &buf); err != nil {
return 0, err
}
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
var err error
var resp *odataResponse
resp, err = c.client.execTable(method, uri, headers, &buf)
if err != nil {
return 0, err
}
defer resp.body.Close()
return resp.statusCode, nil
}
// UpdateEntity updates the contents of an entity with the
// one passed as parameter. The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "PUT"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// MergeEntity merges the contents of an entity with the
// one passed as parameter.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// DeleteEntityWithoutCheck deletes the entity matching by
// PartitionKey and RowKey. There is no check on IfMatch
// parameter so the entity is always deleted.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error {
return c.DeleteEntity(table, entity, "*")
}
// DeleteEntity deletes the entity matching by
// PartitionKey, RowKey and ifMatch field.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table or
// the ifMatch is different.
func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error {
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
headers["If-Match"] = ifMatch
resp, err := c.client.execTable("DELETE", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}
// InsertOrReplaceEntity inserts an entity in the specified table
// or replaced the existing one.
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "PUT"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// InsertOrMergeEntity inserts an entity in the specified table
// or merges the existing one.
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
if err := json.NewEncoder(buf).Encode(entity); err != nil {
return err
}
dec := make(map[string]interface{})
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
return err
}
// Inject PartitionKey and RowKey
dec[partitionKeyNode] = entity.PartitionKey()
dec[rowKeyNode] = entity.RowKey()
// Remove tagged fields
// The tag is defined in the const section
// This is useful to avoid storing the PartitionKey and RowKey twice.
numFields := reflect.ValueOf(entity).Elem().NumField()
for i := 0; i < numFields; i++ {
f := reflect.ValueOf(entity).Elem().Type().Field(i)
if f.Tag.Get(tag) == tagIgnore {
// we must look for its JSON name in the dictionary
// as the user can rename it using a tag
jsonName := f.Name
if f.Tag.Get("json") != "" {
jsonName = f.Tag.Get("json")
}
delete(dec, jsonName)
}
}
buf.Reset()
if err := json.NewEncoder(buf).Encode(&dec); err != nil {
return err
}
return nil
}
func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) {
buf := new(bytes.Buffer)
var ret getTableEntriesResponse
if err := json.NewDecoder(reader).Decode(&ret); err != nil {
return nil, err
}
tEntries := make([]TableEntity, len(ret.Elements))
for i, entry := range ret.Elements {
buf.Reset()
if err := json.NewEncoder(buf).Encode(entry); err != nil {
return nil, err
}
dec := make(map[string]interface{})
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
return nil, err
}
var pKey, rKey string
// strip pk and rk
for key, val := range dec {
switch key {
case partitionKeyNode:
pKey = val.(string)
case rowKeyNode:
rKey = val.(string)
}
}
delete(dec, partitionKeyNode)
delete(dec, rowKeyNode)
buf.Reset()
if err := json.NewEncoder(buf).Encode(dec); err != nil {
return nil, err
}
// Create a empty retType instance
tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity)
// Popolate it with the values
if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil {
return nil, err
}
// Reset PartitionKey and RowKey
tEntries[i].SetPartitionKey(pKey)
tEntries[i].SetRowKey(rKey)
}
return tEntries, nil
}
func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken {
ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)}
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
return &ct
}
return nil
}

View file

@ -221,6 +221,13 @@ func (r *Request) Sign() error {
//
// Send will sign the request prior to sending. All Send Handlers will
// be executed in the order they were set.
//
// Canceling a request is non-deterministic. If a request has been canceled,
// then the transport will choose, randomly, one of the state channels during
// reads or getting the connection.
//
// readLoop() and getConn(req *Request, cm connectMethod)
// https://github.com/golang/go/blob/master/src/net/http/transport.go
func (r *Request) Send() error {
for {
if aws.BoolValue(r.Retryable) {

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.1.20"
const SDKVersion = "1.1.22"

View file

@ -120,7 +120,8 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag)
name = locName
}
fmt.Fprintf(buf, "%q:", name)
writeString(name, buf)
buf.WriteString(`:`)
err := buildAny(member, buf, field.Tag)
if err != nil {
@ -167,7 +168,9 @@ func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) err
buf.WriteByte(',')
}
fmt.Fprintf(buf, "%q:", k)
writeString(k.String(), buf)
buf.WriteString(`:`)
buildAny(value.MapIndex(k), buf, "")
}

View file

@ -1777,6 +1777,11 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out
// which includes only a default DNS server that we provide (AmazonProvidedDNS).
// For more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// You can specify the instance tenancy value for the VPC when you create it.
// You can't change this value for the VPC after you create it. For more information,
// see Dedicated Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/dedicated-instance.html.html)
// in the Amazon Virtual Private Cloud User Guide.
func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) {
req, out := c.CreateVpcRequest(input)
err := req.Send()
@ -1887,6 +1892,9 @@ func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req *
// create a new VPN connection, you must reconfigure your customer gateway with
// the new information returned from this call.
//
// This is an idempotent operation. If you perform the operation more than
// once, Amazon EC2 doesn't return an error.
//
// For more information about VPN connections, see Adding a Hardware Virtual
// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
// in the Amazon Virtual Private Cloud User Guide.
@ -3130,7 +3138,8 @@ func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *reques
// request only returns information about resource types whose ID formats can
// be modified; it does not return information about other resource types.
//
// The following resource types support longer IDs: instance | reservation.
// The following resource types support longer IDs: instance | reservation
// | snapshot | volume.
//
// These settings apply to the IAM user who makes the request; they do not
// apply to the entire AWS account. By default, an IAM user defaults to the
@ -3318,7 +3327,8 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput)
return
}
// Describes the status of one or more instances.
// Describes the status of one or more instances. By default, only running instances
// are described, unless specified otherwise.
//
// Instance status includes the following components:
//
@ -3328,7 +3338,7 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput)
// and Troubleshooting Instances with Failed Status Checks (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Scheduled events - Amazon EC2 can schedule events (such as reboot, stop,
// Scheduled events - Amazon EC2 can schedule events (such as reboot, stop,
// or terminate) for your instances related to hardware issues, software updates,
// or system maintenance. For more information, see Scheduled Events for Your
// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html)
@ -5559,7 +5569,7 @@ func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Re
// Modifies the ID format for the specified resource on a per-region basis.
// You can specify that resources should receive longer IDs (17-character IDs)
// when they are created. The following resource types support longer IDs: instance
// | reservation.
// | reservation | snapshot | volume.
//
// This setting applies to the IAM user who makes the request; it does not
// apply to the entire AWS account. By default, an IAM user defaults to the
@ -5964,6 +5974,49 @@ func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpoi
return out, err
}
const opModifyVpcPeeringConnectionOptions = "ModifyVpcPeeringConnectionOptions"
// ModifyVpcPeeringConnectionOptionsRequest generates a request for the ModifyVpcPeeringConnectionOptions operation.
func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringConnectionOptionsInput) (req *request.Request, output *ModifyVpcPeeringConnectionOptionsOutput) {
op := &request.Operation{
Name: opModifyVpcPeeringConnectionOptions,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ModifyVpcPeeringConnectionOptionsInput{}
}
req = c.newRequest(op, input, output)
output = &ModifyVpcPeeringConnectionOptionsOutput{}
req.Data = output
return
}
// Modifies the VPC peering connection options on one side of a VPC peering
// connection. You can do the following:
//
// Enable/disable communication over the peering connection between an EC2-Classic
// instance that's linked to your VPC (using ClassicLink) and instances in the
// peer VPC.
//
// Enable/disable communication over the peering connection between instances
// in your VPC and an EC2-Classic instance that's linked to the peer VPC.
//
// If the peered VPCs are in different accounts, each owner must initiate
// a separate request to enable or disable communication in either direction,
// depending on whether their VPC was the requester or accepter for the VPC
// peering connection. If the peered VPCs are in the same account, you can modify
// the requester and accepter options in the same request. To confirm which
// VPC is the accepter and requester for a VPC peering connection, use the DescribeVpcPeeringConnections
// command.
func (c *EC2) ModifyVpcPeeringConnectionOptions(input *ModifyVpcPeeringConnectionOptionsInput) (*ModifyVpcPeeringConnectionOptionsOutput, error) {
req, out := c.ModifyVpcPeeringConnectionOptionsRequest(input)
err := req.Send()
return out, err
}
const opMonitorInstances = "MonitorInstances"
// MonitorInstancesRequest generates a request for the MonitorInstances operation.
@ -6091,7 +6144,11 @@ func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstance
// Scheduled Instances enable you to purchase Amazon EC2 compute capacity by
// the hour for a one-year term. Before you can purchase a Scheduled Instance,
// you must call DescribeScheduledInstanceAvailability to check for available
// schedules and obtain a purchase token.
// schedules and obtain a purchase token. After you purchase a Scheduled Instance,
// you must call RunScheduledInstances during each scheduled time period.
//
// After you purchase a Scheduled Instance, you can't cancel, modify, or resell
// your purchase.
func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) (*PurchaseScheduledInstancesOutput, error) {
req, out := c.PurchaseScheduledInstancesRequest(input)
err := req.Send()
@ -6125,8 +6182,8 @@ func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.
// succeeds if the instances are valid and belong to you. Requests to reboot
// terminated instances are ignored.
//
// If a Linux/Unix instance does not cleanly shut down within four minutes,
// Amazon EC2 performs a hard reboot.
// If an instance does not cleanly shut down within four minutes, Amazon EC2
// performs a hard reboot.
//
// For more information about troubleshooting, see Getting Console Output and
// Rebooting Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html)
@ -6887,7 +6944,9 @@ func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (r
// You must launch a Scheduled Instance during its scheduled time period. You
// can't stop or reboot a Scheduled Instance, but you can terminate it as needed.
// If you terminate a Scheduled Instance before the current scheduled time period
// ends, you can launch it again after a few minutes.
// ends, you can launch it again after a few minutes. For more information,
// see Scheduled Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-scheduled-instances.html)
// in the Amazon Elastic Compute Cloud User Guide.
func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunScheduledInstancesOutput, error) {
req, out := c.RunScheduledInstancesRequest(input)
err := req.Send()
@ -6959,32 +7018,29 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ
return
}
// Stops an Amazon EBS-backed instance. Each time you transition an instance
// from stopped to started, Amazon EC2 charges a full instance hour, even if
// transitions happen multiple times within a single hour.
// Stops an Amazon EBS-backed instance.
//
// You can't start or stop Spot instances.
// We don't charge hourly usage for a stopped instance, or data transfer fees;
// however, your root partition Amazon EBS volume remains, continues to persist
// your data, and you are charged for Amazon EBS volume usage. Each time you
// transition an instance from stopped to started, Amazon EC2 charges a full
// instance hour, even if transitions happen multiple times within a single
// hour.
//
// Instances that use Amazon EBS volumes as their root devices can be quickly
// stopped and started. When an instance is stopped, the compute resources are
// released and you are not billed for hourly instance usage. However, your
// root partition Amazon EBS volume remains, continues to persist your data,
// and you are charged for Amazon EBS volume usage. You can restart your instance
// at any time.
// You can't start or stop Spot instances, and you can't stop instance store-backed
// instances.
//
// Before stopping an instance, make sure it is in a state from which it can
// be restarted. Stopping an instance does not preserve data stored in RAM.
// When you stop an instance, we shut it down. You can restart your instance
// at any time. Before stopping an instance, make sure it is in a state from
// which it can be restarted. Stopping an instance does not preserve data stored
// in RAM.
//
// Performing this operation on an instance that uses an instance store as
// its root device returns an error.
//
// You can stop, start, and terminate EBS-backed instances. You can only terminate
// instance store-backed instances. What happens to an instance differs if you
// stop it or terminate it. For example, when you stop an instance, the root
// device and any other devices attached to the instance persist. When you terminate
// an instance, the root device and any other devices attached during the instance
// launch are automatically deleted. For more information about the differences
// between stopping and terminating instances, see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html)
// Stopping an instance is different to rebooting or terminating it. For example,
// when you stop an instance, the root device and any other devices attached
// to the instance persist. When you terminate an instance, the root device
// and any other devices attached during the instance launch are automatically
// deleted. For more information about the differences between rebooting, stopping,
// and terminating instances, see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// For more information about troubleshooting, see Troubleshooting Stopping
@ -9450,11 +9506,10 @@ type CreateImageInput struct {
// at-signs (@), or underscores(_)
Name *string `locationName:"name" type:"string" required:"true"`
// By default, this parameter is set to false, which means Amazon EC2 attempts
// to shut down the instance cleanly before image creation and then reboots
// the instance. When the parameter is set to true, Amazon EC2 doesn't shut
// down the instance before creating the image. When this option is used, file
// system integrity on the created image can't be guaranteed.
// By default, Amazon EC2 attempts to shut down and reboot the instance before
// creating the image. If the 'No Reboot' option is set, Amazon EC2 doesn't
// shut down the instance before creating the image. When this option is used,
// file system integrity on the created image can't be guaranteed.
NoReboot *bool `locationName:"noReboot" type:"boolean"`
}
@ -10755,11 +10810,11 @@ type CreateVpcInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
// The supported tenancy options for instances launched into the VPC. A value
// of default means that instances can be launched with any tenancy; a value
// of dedicated means all instances launched into the VPC are launched as dedicated
// tenancy instances regardless of the tenancy assigned to the instance at launch.
// Dedicated tenancy instances run on single-tenant hardware.
// The tenancy options for instances launched into the VPC. For default, instances
// are launched with shared tenancy by default. You can launch instances with
// any tenancy into a shared tenancy VPC. For dedicated, instances are launched
// as dedicated tenancy instances by default. You can only launch instances
// with a tenancy of dedicated or host into a dedicated tenancy VPC.
//
// Important: The host value cannot be used with this parameter. Use the default
// or dedicated values only.
@ -13282,7 +13337,8 @@ type DescribeImportImageTasksInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
// One or more filters.
// Filter tasks using the task-state filter and one of the following values:
// active, completed, deleting, deleted.
Filters []*Filter `locationNameList:"Filter" type:"list"`
// A list of import image task IDs.
@ -21159,6 +21215,78 @@ func (s ModifyVpcEndpointOutput) GoString() string {
return s.String()
}
type ModifyVpcPeeringConnectionOptionsInput struct {
_ struct{} `type:"structure"`
// The VPC peering connection options for the accepter VPC.
AccepterPeeringConnectionOptions *PeeringConnectionOptionsRequest `type:"structure"`
// Checks whether you have the required permissions for the operation, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
// The VPC peering connection options for the requester VPC.
RequesterPeeringConnectionOptions *PeeringConnectionOptionsRequest `type:"structure"`
// The ID of the VPC peering connection.
VpcPeeringConnectionId *string `type:"string" required:"true"`
}
// String returns the string representation
func (s ModifyVpcPeeringConnectionOptionsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyVpcPeeringConnectionOptionsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ModifyVpcPeeringConnectionOptionsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ModifyVpcPeeringConnectionOptionsInput"}
if s.VpcPeeringConnectionId == nil {
invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId"))
}
if s.AccepterPeeringConnectionOptions != nil {
if err := s.AccepterPeeringConnectionOptions.Validate(); err != nil {
invalidParams.AddNested("AccepterPeeringConnectionOptions", err.(request.ErrInvalidParams))
}
}
if s.RequesterPeeringConnectionOptions != nil {
if err := s.RequesterPeeringConnectionOptions.Validate(); err != nil {
invalidParams.AddNested("RequesterPeeringConnectionOptions", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
type ModifyVpcPeeringConnectionOptionsOutput struct {
_ struct{} `type:"structure"`
// Information about the VPC peering connection options for the accepter VPC.
AccepterPeeringConnectionOptions *PeeringConnectionOptions `locationName:"accepterPeeringConnectionOptions" type:"structure"`
// Information about the VPC peering connection options for the requester VPC.
RequesterPeeringConnectionOptions *PeeringConnectionOptions `locationName:"requesterPeeringConnectionOptions" type:"structure"`
}
// String returns the string representation
func (s ModifyVpcPeeringConnectionOptionsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyVpcPeeringConnectionOptionsOutput) GoString() string {
return s.String()
}
// Contains the parameters for MonitorInstances.
type MonitorInstancesInput struct {
_ struct{} `type:"structure"`
@ -21706,6 +21834,68 @@ func (s NewDhcpConfiguration) GoString() string {
return s.String()
}
// Describes the VPC peering connection options.
type PeeringConnectionOptions struct {
_ struct{} `type:"structure"`
// If true, enables outbound communication from an EC2-Classic instance that's
// linked to a local VPC via ClassicLink to instances in a peer VPC.
AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"`
// If true, enables outbound communication from instances in a local VPC to
// an EC2-Classic instance that's linked to a peer VPC via ClassicLink.
AllowEgressFromLocalVpcToRemoteClassicLink *bool `locationName:"allowEgressFromLocalVpcToRemoteClassicLink" type:"boolean"`
}
// String returns the string representation
func (s PeeringConnectionOptions) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PeeringConnectionOptions) GoString() string {
return s.String()
}
// The VPC peering connection options.
type PeeringConnectionOptionsRequest struct {
_ struct{} `type:"structure"`
// If true, enables outbound communication from an EC2-Classic instance that's
// linked to a local VPC via ClassicLink to instances in a peer VPC.
AllowEgressFromLocalClassicLinkToRemoteVpc *bool `type:"boolean" required:"true"`
// If true, enables outbound communication from instances in a local VPC to
// an EC2-Classic instance that's linked to a peer VPC via ClassicLink.
AllowEgressFromLocalVpcToRemoteClassicLink *bool `type:"boolean" required:"true"`
}
// String returns the string representation
func (s PeeringConnectionOptionsRequest) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PeeringConnectionOptionsRequest) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *PeeringConnectionOptionsRequest) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "PeeringConnectionOptionsRequest"}
if s.AllowEgressFromLocalClassicLinkToRemoteVpc == nil {
invalidParams.Add(request.NewErrParamRequired("AllowEgressFromLocalClassicLinkToRemoteVpc"))
}
if s.AllowEgressFromLocalVpcToRemoteClassicLink == nil {
invalidParams.Add(request.NewErrParamRequired("AllowEgressFromLocalVpcToRemoteClassicLink"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// Describes the placement for the instance.
type Placement struct {
_ struct{} `type:"structure"`
@ -25698,29 +25888,28 @@ type StateReason struct {
// The message for the state change.
//
// Server.SpotInstanceTermination: A Spot instance was terminated due to
// an increase in the market price.
// Server.SpotInstanceTermination: A Spot instance was terminated due to an
// increase in the market price.
//
// Server.InternalError: An internal error occurred during instance launch,
// Server.InternalError: An internal error occurred during instance launch,
// resulting in termination.
//
// Server.InsufficientInstanceCapacity: There was insufficient instance capacity
// Server.InsufficientInstanceCapacity: There was insufficient instance capacity
// to satisfy the launch request.
//
// Client.InternalError: A client error caused the instance to terminate
// on launch.
// Client.InternalError: A client error caused the instance to terminate on
// launch.
//
// Client.InstanceInitiatedShutdown: The instance was shut down using the
// shutdown -h command from the instance.
// Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown
// -h command from the instance.
//
// Client.UserInitiatedShutdown: The instance was shut down using the Amazon
// Client.UserInitiatedShutdown: The instance was shut down using the Amazon
// EC2 API.
//
// Client.VolumeLimitExceeded: The limit on the number of EBS volumes or
// total storage was exceeded. Decrease usage or request an increase in your
// limits.
// Client.VolumeLimitExceeded: The limit on the number of EBS volumes or total
// storage was exceeded. Decrease usage or request an increase in your limits.
//
// Client.InvalidSnapshot.NotFound: The specified snapshot was not found.
// Client.InvalidSnapshot.NotFound: The specified snapshot was not found.
Message *string `locationName:"message" type:"string"`
}
@ -26627,13 +26816,15 @@ func (s VpcEndpoint) GoString() string {
type VpcPeeringConnection struct {
_ struct{} `type:"structure"`
// The information of the peer VPC.
// Information about the peer VPC. CIDR block information is not returned when
// creating a VPC peering connection, or when describing a VPC peering connection
// that's in the initiating-request or pending-acceptance state.
AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"`
// The time that an unaccepted VPC peering connection will expire.
ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"`
// The information of the requester VPC.
// Information about the requester VPC.
RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"`
// The status of the VPC peering connection.
@ -26656,6 +26847,29 @@ func (s VpcPeeringConnection) GoString() string {
return s.String()
}
// Describes the VPC peering connection options.
type VpcPeeringConnectionOptionsDescription struct {
_ struct{} `type:"structure"`
// Indicates whether a local ClassicLink connection can communicate with the
// peer VPC over the VPC peering connection.
AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"`
// Indicates whether a local VPC can communicate with a ClassicLink connection
// in the peer VPC over the VPC peering connection.
AllowEgressFromLocalVpcToRemoteClassicLink *bool `locationName:"allowEgressFromLocalVpcToRemoteClassicLink" type:"boolean"`
}
// String returns the string representation
func (s VpcPeeringConnectionOptionsDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s VpcPeeringConnectionOptionsDescription) GoString() string {
return s.String()
}
// Describes the status of a VPC peering connection.
type VpcPeeringConnectionStateReason struct {
_ struct{} `type:"structure"`
@ -26687,6 +26901,10 @@ type VpcPeeringConnectionVpcInfo struct {
// The AWS account ID of the VPC owner.
OwnerId *string `locationName:"ownerId" type:"string"`
// Information about the VPC peering connection options for the accepter or
// requester VPC.
PeeringOptions *VpcPeeringConnectionOptionsDescription `locationName:"peeringOptions" type:"structure"`
// The ID of the VPC.
VpcId *string `locationName:"vpcId" type:"string"`
}

View file

@ -39,7 +39,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
// Important: You cannot call AssumeRole by using AWS root account credentials;
// Important: You cannot call AssumeRole by using AWS root account credentials;
// access is denied. You must use IAM user credentials or temporary security
// credentials to call AssumeRole.
//
@ -78,8 +78,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// policy of the role that is being assumed. If you pass a policy to this operation,
// the temporary security credentials that are returned by the operation have
// the permissions that are allowed by both the access policy of the role that
// is being assumed, and the policy that you pass. This gives you a way to further
// restrict the permissions for the resulting temporary security credentials.
// is being assumed, and the policy that you pass. This gives you a way to
// further restrict the permissions for the resulting temporary security credentials.
// You cannot use the passed policy to grant permissions that are in excess
// of those allowed by the access policy of the role that is being assumed.
// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
@ -174,8 +174,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// policy of the role that is being assumed. If you pass a policy to this operation,
// the temporary security credentials that are returned by the operation have
// the permissions that are allowed by both the access policy of the role that
// is being assumed, and the policy that you pass. This gives you a way to further
// restrict the permissions for the resulting temporary security credentials.
// is being assumed, and the policy that you pass. This gives you a way to
// further restrict the permissions for the resulting temporary security credentials.
// You cannot use the passed policy to grant permissions that are in excess
// of those allowed by the access policy of the role that is being assumed.
// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
@ -192,12 +192,24 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// The identity of the caller is validated by using keys in the metadata document
// that is uploaded for the SAML provider entity for your identity provider.
//
// For more information, see the following resources:
// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
// logs. The entry includes the value in the NameID element of the SAML assertion.
// We recommend that you use a NameIDType that is not associated with any personally
// identifiable information (PII). For example, you could instead use the Persistent
// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
//
// About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
// in the IAM User Guide. Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
// in the IAM User Guide. Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
// in the IAM User Guide. Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
// For more information, see the following resources:
//
// About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
// in the IAM User Guide.
//
// Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
// in the IAM User Guide.
//
// Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
// in the IAM User Guide.
//
// Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
// in the IAM User Guide.
func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
req, out := c.AssumeRoleWithSAMLRequest(input)
@ -270,8 +282,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// policy of the role that is being assumed. If you pass a policy to this operation,
// the temporary security credentials that are returned by the operation have
// the permissions that are allowed by both the access policy of the role that
// is being assumed, and the policy that you pass. This gives you a way to further
// restrict the permissions for the resulting temporary security credentials.
// is being assumed, and the policy that you pass. This gives you a way to
// further restrict the permissions for the resulting temporary security credentials.
// You cannot use the passed policy to grant permissions that are in excess
// of those allowed by the access policy of the role that is being assumed.
// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
@ -284,19 +296,30 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// the identity provider that is associated with the identity token. In other
// words, the identity provider must be specified in the role's trust policy.
//
// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
// of the provided Web Identity Token. We recommend that you avoid using any
// personally identifiable information (PII) in this field. For example, you
// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
//
// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
// API, see the following resources:
//
// Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual)
// Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual)
// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
// Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
//
// Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
// This interactive website lets you walk through the process of authenticating
// via Login with Amazon, Facebook, or Google, getting temporary security credentials,
// and then using those credentials to make a request to AWS. AWS SDK for iOS
// (http://aws.amazon.com/sdkforios/) and AWS SDK for Android (http://aws.amazon.com/sdkforandroid/).
// These toolkits contain sample apps that show how to invoke the identity providers,
// and then how to use the information from these providers to get and use temporary
// security credentials. Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
// and then using those credentials to make a request to AWS.
//
// AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps
// that show how to invoke the identity providers, and then how to use the information
// from these providers to get and use temporary security credentials.
//
// Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
// This article discusses web identity federation and shows an example of how
// to use web identity federation to get access to content in Amazon S3.
func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
@ -335,20 +358,28 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
//
// Only certain AWS actions return an encoded authorization message. The documentation
// for an individual action indicates whether that action returns an encoded
// message in addition to returning an HTTP code. The message is encoded because
// the details of the authorization status can constitute privileged information
// that the user who requested the action should not see. To decode an authorization
// status message, a user must be granted permissions via an IAM policy to request
// the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action.
// message in addition to returning an HTTP code.
//
// The message is encoded because the details of the authorization status
// can constitute privileged information that the user who requested the action
// should not see. To decode an authorization status message, a user must be
// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
// (sts:DecodeAuthorizationMessage) action.
//
// The decoded message includes the following type of information:
//
// Whether the request was denied due to an explicit deny or due to the absence
// Whether the request was denied due to an explicit deny or due to the absence
// of an explicit allow. For more information, see Determining Whether a Request
// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
// in the IAM User Guide. The principal who made the request. The requested
// action. The requested resource. The values of condition keys in the context
// of the user's request.
// in the IAM User Guide.
//
// The principal who made the request.
//
// The requested action.
//
// The requested resource.
//
// The values of condition keys in the context of the user's request.
func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
req, out := c.DecodeAuthorizationMessageRequest(input)
err := req.Send()
@ -449,17 +480,20 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// The permissions for the temporary security credentials returned by GetFederationToken
// are determined by a combination of the following:
//
// The policy or policies that are attached to the IAM user whose credentials
// are used to call GetFederationToken. The policy that is passed as a parameter
// in the call. The passed policy is attached to the temporary security credentials
// that result from the GetFederationToken API call--that is, to the federated
// user. When the federated user makes an AWS request, AWS evaluates the policy
// attached to the federated user in combination with the policy or policies
// attached to the IAM user whose credentials were used to call GetFederationToken.
// AWS allows the federated user's request only when both the federated user
// and the IAM user are explicitly allowed to perform the requested action.
// The passed policy cannot grant more permissions than those that are defined
// in the IAM user policy.
// The policy or policies that are attached to the IAM user whose credentials
// are used to call GetFederationToken.
//
// The policy that is passed as a parameter in the call.
//
// The passed policy is attached to the temporary security credentials that
// result from the GetFederationToken API call--that is, to the federated user.
// When the federated user makes an AWS request, AWS evaluates the policy attached
// to the federated user in combination with the policy or policies attached
// to the IAM user whose credentials were used to call GetFederationToken. AWS
// allows the federated user's request only when both the federated user and
// the IAM user are explicitly allowed to perform the requested action. The
// passed policy cannot grant more permissions than those that are defined in
// the IAM user policy.
//
// A typical use case is that the permissions of the IAM user whose credentials
// are used to call GetFederationToken are designed to allow access to all the
@ -573,6 +607,10 @@ type AssumeRoleInput struct {
// created it. For more information about the external ID, see How to Use an
// External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
// in the IAM User Guide.
//
// The format for this parameter, as described by its regex pattern, is a string
// of characters consisting of upper- and lower-case alphanumeric characters
// with no spaces. You can also include any of the following characters: =,.@:\/-
ExternalId *string `min:"2" type:"string"`
// An IAM policy in JSON format.
@ -588,7 +626,13 @@ type AssumeRoleInput struct {
// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
// in the IAM User Guide.
//
// The policy plain text must be 2048 bytes or shorter. However, an internal
// The format for this parameter, as described by its regex pattern, is a string
// of characters up to 2048 characters in length. The characters can be any
// ASCII character from the space character to the end of the valid character
// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
// and carriage return (\u000D) characters.
//
// The policy plain text must be 2048 bytes or shorter. However, an internal
// conversion compresses it into a packed binary format with a separate limit.
// The PackedPolicySize response element indicates by percentage how close to
// the upper size limit the policy is, with 100% equaling the maximum allowed
@ -607,6 +651,10 @@ type AssumeRoleInput struct {
// of the assumed role principal. This means that subsequent cross-account API
// requests using the temporary security credentials will expose the role session
// name to the external account in their CloudTrail logs.
//
// The format for this parameter, as described by its regex pattern, is a string
// of characters consisting of upper- and lower-case alphanumeric characters
// with no spaces. You can also include any of the following characters: =,.@-
RoleSessionName *string `min:"2" type:"string" required:"true"`
// The identification number of the MFA device that is associated with the user
@ -614,12 +662,19 @@ type AssumeRoleInput struct {
// of the role being assumed includes a condition that requires MFA authentication.
// The value is either the serial number for a hardware device (such as GAHT12345678)
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
//
// The format for this parameter, as described by its regex pattern, is a string
// of characters consisting of upper- and lower-case alphanumeric characters
// with no spaces. You can also include any of the following characters: =,.@-
SerialNumber *string `min:"9" type:"string"`
// The value provided by the MFA device, if the trust policy of the role being
// assumed requires MFA (that is, if the policy includes a condition that tests
// for MFA). If the role being assumed requires MFA and if the TokenCode value
// is missing or expired, the AssumeRole call returns an "access denied" error.
//
// The format for this parameter, as described by its regex pattern, is a sequence
// of six numeric digits.
TokenCode *string `min:"6" type:"string"`
}
@ -685,7 +740,7 @@ type AssumeRoleOutput struct {
// The temporary security credentials, which include an access key ID, a secret
// access key, and a security (or session) token.
//
// Note: The size of the security token that STS APIs return is not fixed.
// Note: The size of the security token that STS APIs return is not fixed.
// We strongly recommend that you make no assumptions about the maximum size.
// As of this writing, the typical size is less than 4096 bytes, but that can
// vary. Also, future updates to AWS might require larger sizes.
@ -716,8 +771,8 @@ type AssumeRoleWithSAMLInput struct {
// response's SessionNotOnOrAfter value. The actual expiration time is whichever
// value is shorter.
//
// The maximum duration for a session is 1 hour, and the minimum duration is
// 15 minutes, even if values outside this range are specified.
// The maximum duration for a session is 1 hour, and the minimum duration
// is 15 minutes, even if values outside this range are specified.
DurationSeconds *int64 `min:"900" type:"integer"`
// An IAM policy in JSON format.
@ -725,15 +780,21 @@ type AssumeRoleWithSAMLInput struct {
// The policy parameter is optional. If you pass a policy, the temporary security
// credentials that are returned by the operation have the permissions that
// are allowed by both the access policy of the role that is being assumed,
// and the policy that you pass. This gives you a way to further restrict the
// permissions for the resulting temporary security credentials. You cannot
// and the policy that you pass. This gives you a way to further restrict
// the permissions for the resulting temporary security credentials. You cannot
// use the passed policy to grant permissions that are in excess of those allowed
// by the access policy of the role that is being assumed. For more information,
// Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
// in the IAM User Guide.
//
// The policy plain text must be 2048 bytes or shorter. However, an internal
// The format for this parameter, as described by its regex pattern, is a string
// of characters up to 2048 characters in length. The characters can be any
// ASCII character from the space character to the end of the valid character
// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
// and carriage return (\u000D) characters.
//
// The policy plain text must be 2048 bytes or shorter. However, an internal
// conversion compresses it into a packed binary format with a separate limit.
// The PackedPolicySize response element indicates by percentage how close to
// the upper size limit the policy is, with 100% equaling the maximum allowed
@ -815,7 +876,7 @@ type AssumeRoleWithSAMLOutput struct {
// The temporary security credentials, which include an access key ID, a secret
// access key, and a security (or session) token.
//
// Note: The size of the security token that STS APIs return is not fixed.
// Note: The size of the security token that STS APIs return is not fixed.
// We strongly recommend that you make no assumptions about the maximum size.
// As of this writing, the typical size is less than 4096 bytes, but that can
// vary. Also, future updates to AWS might require larger sizes.
@ -877,14 +938,20 @@ type AssumeRoleWithWebIdentityInput struct {
// The policy parameter is optional. If you pass a policy, the temporary security
// credentials that are returned by the operation have the permissions that
// are allowed by both the access policy of the role that is being assumed,
// and the policy that you pass. This gives you a way to further restrict the
// permissions for the resulting temporary security credentials. You cannot
// and the policy that you pass. This gives you a way to further restrict
// the permissions for the resulting temporary security credentials. You cannot
// use the passed policy to grant permissions that are in excess of those allowed
// by the access policy of the role that is being assumed. For more information,
// see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
// in the IAM User Guide.
//
// The policy plain text must be 2048 bytes or shorter. However, an internal
// The format for this parameter, as described by its regex pattern, is a string
// of characters up to 2048 characters in length. The characters can be any
// ASCII character from the space character to the end of the valid character
// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
// and carriage return (\u000D) characters.
//
// The policy plain text must be 2048 bytes or shorter. However, an internal
// conversion compresses it into a packed binary format with a separate limit.
// The PackedPolicySize response element indicates by percentage how close to
// the upper size limit the policy is, with 100% equaling the maximum allowed
@ -908,6 +975,10 @@ type AssumeRoleWithWebIdentityInput struct {
// That way, the temporary security credentials that your application will use
// are associated with that user. This session name is included as part of the
// ARN and assumed role ID in the AssumedRoleUser response element.
//
// The format for this parameter, as described by its regex pattern, is a string
// of characters consisting of upper- and lower-case alphanumeric characters
// with no spaces. You can also include any of the following characters: =,.@-
RoleSessionName *string `min:"2" type:"string" required:"true"`
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by
@ -984,7 +1055,7 @@ type AssumeRoleWithWebIdentityOutput struct {
// The temporary security credentials, which include an access key ID, a secret
// access key, and a security token.
//
// Note: The size of the security token that STS APIs return is not fixed.
// Note: The size of the security token that STS APIs return is not fixed.
// We strongly recommend that you make no assumptions about the maximum size.
// As of this writing, the typical size is less than 4096 bytes, but that can
// vary. Also, future updates to AWS might require larger sizes.
@ -1113,8 +1184,7 @@ func (s *DecodeAuthorizationMessageInput) Validate() error {
type DecodeAuthorizationMessageOutput struct {
_ struct{} `type:"structure"`
// An XML document that contains the decoded message. For more information,
// see DecodeAuthorizationMessage.
// An XML document that contains the decoded message.
DecodedMessage *string `type:"string"`
}
@ -1212,6 +1282,10 @@ type GetFederationTokenInput struct {
// temporary security credentials (such as Bob). For example, you can reference
// the federated user name in a resource-based policy, such as in an Amazon
// S3 bucket policy.
//
// The format for this parameter, as described by its regex pattern, is a string
// of characters consisting of upper- and lower-case alphanumeric characters
// with no spaces. You can also include any of the following characters: =,.@-
Name *string `min:"2" type:"string" required:"true"`
// An IAM policy in JSON format that is passed with the GetFederationToken call
@ -1229,12 +1303,20 @@ type GetFederationTokenInput struct {
// credentials are used to access a resource that has a resource-based policy
// that specifically allows the federated user to access the resource.
//
// The policy plain text must be 2048 bytes or shorter. However, an internal
// The format for this parameter, as described by its regex pattern, is a string
// of characters up to 2048 characters in length. The characters can be any
// ASCII character from the space character to the end of the valid character
// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
// and carriage return (\u000D) characters.
//
// The policy plain text must be 2048 bytes or shorter. However, an internal
// conversion compresses it into a packed binary format with a separate limit.
// The PackedPolicySize response element indicates by percentage how close to
// the upper size limit the policy is, with 100% equaling the maximum allowed
// size. For more information about how permissions work, see Permissions for
// GetFederationToken (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
// size.
//
// For more information about how permissions work, see Permissions for GetFederationToken
// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
Policy *string `min:"1" type:"string"`
}
@ -1278,7 +1360,7 @@ type GetFederationTokenOutput struct {
// The temporary security credentials, which include an access key ID, a secret
// access key, and a security (or session) token.
//
// Note: The size of the security token that STS APIs return is not fixed.
// Note: The size of the security token that STS APIs return is not fixed.
// We strongly recommend that you make no assumptions about the maximum size.
// As of this writing, the typical size is less than 4096 bytes, but that can
// vary. Also, future updates to AWS might require larger sizes.
@ -1324,6 +1406,10 @@ type GetSessionTokenInput struct {
// Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
// You can find the device for an IAM user by going to the AWS Management Console
// and viewing the user's security credentials.
//
// The format for this parameter, as described by its regex pattern, is a string
// of characters consisting of upper- and lower-case alphanumeric characters
// with no spaces. You can also include any of the following characters: =,.@-
SerialNumber *string `min:"9" type:"string"`
// The value provided by the MFA device, if MFA is required. If any policy requires
@ -1331,6 +1417,9 @@ type GetSessionTokenInput struct {
// is required, and the user does not provide a code when requesting a set of
// temporary security credentials, the user will receive an "access denied"
// response when requesting resources that require MFA authentication.
//
// The format for this parameter, as described by its regex pattern, is a sequence
// of six numeric digits.
TokenCode *string `min:"6" type:"string"`
}
@ -1371,7 +1460,7 @@ type GetSessionTokenOutput struct {
// The temporary security credentials, which include an access key ID, a secret
// access key, and a security (or session) token.
//
// Note: The size of the security token that STS APIs return is not fixed.
// Note: The size of the security token that STS APIs return is not fixed.
// We strongly recommend that you make no assumptions about the maximum size.
// As of this writing, the typical size is less than 4096 bytes, but that can
// vary. Also, future updates to AWS might require larger sizes.

View file

@ -17,24 +17,25 @@ import (
// This guide provides descriptions of the STS API. For more detailed information
// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// As an alternative to using the API, you can use one of the AWS SDKs, which
// As an alternative to using the API, you can use one of the AWS SDKs, which
// consist of libraries and sample code for various programming languages and
// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
// way to create programmatic access to STS. For example, the SDKs take care
// of cryptographically signing requests, managing errors, and retrying requests
// automatically. For information about the AWS SDKs, including how to download
// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
//
// For information about setting up signatures and authorization through the
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html"
// target="_blank) in the AWS General Reference. For general information about
// the Query API, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html"
// target="_blank) in Using IAM. For information about using security tokens
// with other AWS products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
// in the AWS General Reference. For general information about the Query API,
// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
// in Using IAM. For information about using security tokens with other AWS
// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
// in the IAM User Guide.
//
// If you're new to AWS and need additional technical information about a specific
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
// (http://aws.amazon.com/documentation/" target="_blank).
// (http://aws.amazon.com/documentation/).
//
// Endpoints
//

View file

@ -1,4 +1,6 @@
[![Build Status](https://travis-ci.org/cloudfoundry-incubator/candiedyaml.svg)](https://travis-ci.org/cloudfoundry-incubator/candiedyaml)
[![GoDoc](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml?status.svg)](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml)
candiedyaml
===========

View file

@ -1,6 +1,7 @@
language: go
sudo: false
sudo: required
dist: trusty
cache:
directories:

View file

@ -67,3 +67,4 @@ nikandfor <nikandfor@gmail.com>
Anthony Woods <awoods@raintank.io>
Alexander Inozemtsev <alexander.inozemtsev@gmail.com>
Rob McColl <rob@robmccoll.com>; <rmccoll@ionicsecurity.com>
Viktor Tönköl <viktor.toenkoel@motionlogic.de>

View file

@ -72,6 +72,9 @@ type ClusterConfig struct {
Discovery DiscoveryConfig
// If not zero, gocql attempt to reconnect known DOWN nodes in every ReconnectSleep.
ReconnectInterval time.Duration
// The maximum amount of time to wait for schema agreement in a cluster after
// receiving a schema change frame. (deault: 60s)
MaxWaitSchemaAgreement time.Duration
@ -126,6 +129,7 @@ func NewCluster(hosts ...string) *ClusterConfig {
PageSize: 5000,
DefaultTimestamp: true,
MaxWaitSchemaAgreement: 60 * time.Second,
ReconnectInterval: 60 * time.Second,
}
return cfg
}

View file

@ -9,7 +9,6 @@ import (
"crypto/tls"
"errors"
"fmt"
"golang.org/x/net/context"
"io"
"io/ioutil"
"log"
@ -20,6 +19,8 @@ import (
"sync/atomic"
"time"
"golang.org/x/net/context"
"github.com/gocql/gocql/internal/lru"
"github.com/gocql/gocql/internal/streams"
@ -127,7 +128,7 @@ type Conn struct {
timeout time.Duration
cfg *ConnConfig
headerBuf []byte
headerBuf [maxFrameHeaderSize]byte
streams *streams.IDGenerator
mu sync.RWMutex
@ -175,17 +176,6 @@ func Connect(host *HostInfo, addr string, cfg *ConnConfig,
return nil, err
}
// going to default to proto 2
if cfg.ProtoVersion < protoVersion1 || cfg.ProtoVersion > protoVersion4 {
log.Printf("unsupported protocol version: %d using 2\n", cfg.ProtoVersion)
cfg.ProtoVersion = 2
}
headerSize := 8
if cfg.ProtoVersion > protoVersion2 {
headerSize = 9
}
c := &Conn{
conn: conn,
r: bufio.NewReader(conn),
@ -197,7 +187,6 @@ func Connect(host *HostInfo, addr string, cfg *ConnConfig,
errorHandler: errorHandler,
compressor: cfg.Compressor,
auth: cfg.Authenticator,
headerBuf: make([]byte, headerSize),
quit: make(chan struct{}),
session: session,
streams: streams.New(cfg.ProtoVersion),
@ -208,33 +197,49 @@ func Connect(host *HostInfo, addr string, cfg *ConnConfig,
c.setKeepalive(cfg.Keepalive)
}
var (
ctx context.Context
cancel func()
)
if c.timeout > 0 {
ctx, cancel = context.WithTimeout(context.Background(), c.timeout)
} else {
ctx, cancel = context.WithCancel(context.Background())
}
defer cancel()
frameTicker := make(chan struct{}, 1)
startupErr := make(chan error, 1)
startupErr := make(chan error)
go func() {
for range frameTicker {
err := c.recv()
startupErr <- err
if err != nil {
select {
case startupErr <- err:
case <-ctx.Done():
}
return
}
}
}()
err = c.startup(frameTicker)
close(frameTicker)
if err != nil {
conn.Close()
return nil, err
}
go func() {
defer close(frameTicker)
err := c.startup(ctx, frameTicker)
select {
case startupErr <- err:
case <-ctx.Done():
}
}()
select {
case err := <-startupErr:
if err != nil {
log.Println(err)
c.Close()
return nil, err
}
case <-time.After(c.timeout):
case <-ctx.Done():
c.Close()
return nil, errors.New("gocql: no response to connection startup within timeout")
}
@ -275,7 +280,7 @@ func (c *Conn) Read(p []byte) (n int, err error) {
return
}
func (c *Conn) startup(frameTicker chan struct{}) error {
func (c *Conn) startup(ctx context.Context, frameTicker chan struct{}) error {
m := map[string]string{
"CQL_VERSION": c.cfg.CQLVersion,
}
@ -284,8 +289,13 @@ func (c *Conn) startup(frameTicker chan struct{}) error {
m["COMPRESSION"] = c.compressor.Name()
}
frameTicker <- struct{}{}
framer, err := c.exec(context.Background(), &writeStartupFrame{opts: m}, nil)
select {
case frameTicker <- struct{}{}:
case <-ctx.Done():
return ctx.Err()
}
framer, err := c.exec(ctx, &writeStartupFrame{opts: m}, nil)
if err != nil {
return err
}
@ -301,13 +311,13 @@ func (c *Conn) startup(frameTicker chan struct{}) error {
case *readyFrame:
return nil
case *authenticateFrame:
return c.authenticateHandshake(v, frameTicker)
return c.authenticateHandshake(ctx, v, frameTicker)
default:
return NewErrProtocol("Unknown type of response to startup frame: %s", v)
}
}
func (c *Conn) authenticateHandshake(authFrame *authenticateFrame, frameTicker chan struct{}) error {
func (c *Conn) authenticateHandshake(ctx context.Context, authFrame *authenticateFrame, frameTicker chan struct{}) error {
if c.auth == nil {
return fmt.Errorf("authentication required (using %q)", authFrame.class)
}
@ -320,8 +330,13 @@ func (c *Conn) authenticateHandshake(authFrame *authenticateFrame, frameTicker c
req := &writeAuthResponseFrame{data: resp}
for {
frameTicker <- struct{}{}
framer, err := c.exec(context.Background(), req, nil)
select {
case frameTicker <- struct{}{}:
case <-ctx.Done():
return ctx.Err()
}
framer, err := c.exec(ctx, req, nil)
if err != nil {
return err
}
@ -425,7 +440,7 @@ func (c *Conn) recv() error {
}
// were just reading headers over and over and copy bodies
head, err := readHeader(c.r, c.headerBuf)
head, err := readHeader(c.r, c.headerBuf[:])
if err != nil {
return err
}

View file

@ -89,6 +89,22 @@ func (c *controlConn) heartBeat() {
}
}
func hostInfo(addr string, defaultPort int) (*HostInfo, error) {
var port int
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
host = addr
port = defaultPort
} else {
port, err = strconv.Atoi(portStr)
if err != nil {
return nil, err
}
}
return &HostInfo{peer: host, port: port}, nil
}
func (c *controlConn) shuffleDial(endpoints []string) (conn *Conn, err error) {
perm := randr.Perm(len(endpoints))
shuffled := make([]string, len(endpoints))
@ -101,24 +117,19 @@ func (c *controlConn) shuffleDial(endpoints []string) (conn *Conn, err error) {
// node.
for _, addr := range shuffled {
if addr == "" {
return nil, fmt.Errorf("control: invalid address: %q", addr)
return nil, fmt.Errorf("invalid address: %q", addr)
}
port := c.session.cfg.Port
addr = JoinHostPort(addr, port)
host, portStr, err := net.SplitHostPort(addr)
var host *HostInfo
host, err = hostInfo(addr, port)
if err != nil {
host = addr
port = c.session.cfg.Port
err = nil
} else {
port, err = strconv.Atoi(portStr)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("invalid address: %q: %v", addr, err)
}
hostInfo, _ := c.session.ring.addHostIfMissing(&HostInfo{peer: host, port: port})
hostInfo, _ := c.session.ring.addHostIfMissing(host)
conn, err = c.session.connect(addr, c, hostInfo)
if err == nil {
return conn, err
@ -127,7 +138,11 @@ func (c *controlConn) shuffleDial(endpoints []string) (conn *Conn, err error) {
log.Printf("gocql: unable to dial control conn %v: %v\n", addr, err)
}
return
if err != nil {
return nil, err
}
return conn, nil
}
func (c *controlConn) connect(endpoints []string) error {
@ -137,9 +152,7 @@ func (c *controlConn) connect(endpoints []string) error {
conn, err := c.shuffleDial(endpoints)
if err != nil {
return fmt.Errorf("control: unable to connect: %v", err)
} else if conn == nil {
return errors.New("control: unable to connect to initial endpoints")
return fmt.Errorf("control: unable to connect to initial hosts: %v", err)
}
if err := c.setupConn(conn); err != nil {

View file

@ -14,6 +14,7 @@ const (
errReadTimeout = 0x1200
errReadFailure = 0x1300
errFunctionFailure = 0x1400
errWriteFailure = 0x1500
errSyntax = 0x2000
errUnauthorized = 0x2100
errInvalid = 0x2200
@ -70,6 +71,15 @@ type RequestErrWriteTimeout struct {
WriteType string
}
type RequestErrWriteFailure struct {
errorFrame
Consistency Consistency
Received int
BlockFor int
NumFailures int
WriteType string
}
type RequestErrReadTimeout struct {
errorFrame
Consistency Consistency

View file

@ -107,6 +107,17 @@ func (s *Session) handleEvent(framer *framer) {
}
func (s *Session) handleSchemaEvent(frames []frame) {
if s.schemaDescriber == nil {
return
}
for _, frame := range frames {
switch f := frame.(type) {
case *schemaChangeKeyspace:
s.schemaDescriber.clearSchema(f.keyspace)
case *schemaChangeTable:
s.schemaDescriber.clearSchema(f.keyspace)
}
}
}
func (s *Session) handleNodeEvent(frames []frame) {
@ -233,6 +244,9 @@ func (s *Session) handleRemovedNode(ip net.IP, port int) {
}
func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) {
if gocqlDebug {
log.Printf("gocql: Session.handleNodeUp: %s:%d\n", ip.String(), port)
}
addr := ip.String()
host := s.ring.getHost(addr)
if host != nil {
@ -264,6 +278,9 @@ func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) {
}
func (s *Session) handleNodeDown(ip net.IP, port int) {
if gocqlDebug {
log.Printf("gocql: Session.handleNodeDown: %s:%d\n", ip.String(), port)
}
addr := ip.String()
host := s.ring.getHost(addr)
if host == nil {

View file

@ -231,6 +231,8 @@ var (
ErrFrameTooBig = errors.New("frame length is bigger than the maximum allowed")
)
const maxFrameHeaderSize = 9
func writeInt(p []byte, n int32) {
p[0] = byte(n >> 24)
p[1] = byte(n >> 16)
@ -339,23 +341,34 @@ type frame interface {
}
func readHeader(r io.Reader, p []byte) (head frameHeader, err error) {
_, err = io.ReadFull(r, p)
_, err = io.ReadFull(r, p[:1])
if err != nil {
return
return frameHeader{}, err
}
version := p[0] & protoVersionMask
if version < protoVersion1 || version > protoVersion4 {
err = fmt.Errorf("gocql: invalid version: %d", version)
return
return frameHeader{}, fmt.Errorf("gocql: unsupported response version: %d", version)
}
headSize := 9
if version < protoVersion3 {
headSize = 8
}
_, err = io.ReadFull(r, p[1:headSize])
if err != nil {
return frameHeader{}, err
}
p = p[:headSize]
head.version = protoVersion(p[0])
head.flags = p[1]
if version > protoVersion2 {
if len(p) < 9 {
if len(p) != 9 {
return frameHeader{}, fmt.Errorf("not enough bytes to read header require 9 got: %d", len(p))
}
@ -363,7 +376,7 @@ func readHeader(r io.Reader, p []byte) (head frameHeader, err error) {
head.op = frameOp(p[4])
head.length = int(readInt(p[5:]))
} else {
if len(p) < 8 {
if len(p) != 8 {
return frameHeader{}, fmt.Errorf("not enough bytes to read header require 8 got: %d", len(p))
}
@ -372,7 +385,7 @@ func readHeader(r io.Reader, p []byte) (head frameHeader, err error) {
head.length = int(readInt(p[4:]))
}
return
return head, nil
}
// explicitly enables tracing for the framers outgoing requests
@ -401,9 +414,9 @@ func (f *framer) readFrame(head *frameHeader) error {
}
// assume the underlying reader takes care of timeouts and retries
_, err := io.ReadFull(f.r, f.rbuf)
n, err := io.ReadFull(f.r, f.rbuf)
if err != nil {
return err
return fmt.Errorf("unable to read frame body: read %d/%d bytes: %v", n, head.length, err)
}
if head.flags&flagCompress == flagCompress {
@ -544,6 +557,16 @@ func (f *framer) parseErrorFrame() frame {
res.BlockFor = f.readInt()
res.DataPresent = f.readByte() != 0
return res
case errWriteFailure:
res := &RequestErrWriteFailure{
errorFrame: errD,
}
res.Consistency = f.readConsistency()
res.Received = f.readInt()
res.BlockFor = f.readInt()
res.NumFailures = f.readInt()
res.WriteType = f.readString()
return res
case errFunctionFailure:
res := RequestErrFunctionFailure{
errorFrame: errD,
@ -552,8 +575,12 @@ func (f *framer) parseErrorFrame() frame {
res.Function = f.readString()
res.ArgTypes = f.readStringList()
return res
case errInvalid, errBootstrapping, errConfig, errCredentials, errOverloaded,
errProtocol, errServer, errSyntax, errTruncate, errUnauthorized:
// TODO(zariel): we should have some distinct types for these errors
return errD
default:
return &errD
panic(fmt.Errorf("unknown error code: 0x%x", errD.code))
}
}

View file

@ -105,8 +105,6 @@ func (s *schemaDescriber) getSchema(keyspaceName string) (*KeyspaceMetadata, err
s.mu.Lock()
defer s.mu.Unlock()
// TODO handle schema change events
metadata, found := s.cache[keyspaceName]
if !found {
// refresh the cache for this keyspace
@ -121,6 +119,14 @@ func (s *schemaDescriber) getSchema(keyspaceName string) (*KeyspaceMetadata, err
return metadata, nil
}
// clears the already cached keyspace metadata
func (s *schemaDescriber) clearSchema(keyspaceName string) {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.cache, keyspaceName)
}
// forcibly updates the current KeyspaceMetadata held by the schema describer
// for a given named keyspace.
func (s *schemaDescriber) refreshSchema(keyspaceName string) error {

View file

@ -9,8 +9,8 @@ import (
"encoding/binary"
"errors"
"fmt"
"golang.org/x/net/context"
"io"
"log"
"net"
"strconv"
"strings"
@ -19,6 +19,8 @@ import (
"time"
"unicode"
"golang.org/x/net/context"
"github.com/gocql/gocql/internal/lru"
)
@ -175,6 +177,10 @@ func NewSession(cfg ClusterConfig) (*Session, error) {
}
}
if cfg.ReconnectInterval > 0 {
go s.reconnectDownedHosts(cfg.ReconnectInterval)
}
// TODO(zariel): we probably dont need this any more as we verify that we
// can connect to one of the endpoints supplied by using the control conn.
// See if there are any connections in the pool
@ -188,6 +194,30 @@ func NewSession(cfg ClusterConfig) (*Session, error) {
return s, nil
}
func (s *Session) reconnectDownedHosts(intv time.Duration) {
for !s.Closed() {
time.Sleep(intv)
hosts := s.ring.allHosts()
// Print session.ring for debug.
if gocqlDebug {
buf := bytes.NewBufferString("Session.ring:")
for _, h := range hosts {
buf.WriteString("[" + h.Peer() + ":" + h.State().String() + "]")
}
log.Println(buf.String())
}
for _, h := range hosts {
if h.IsUp() {
continue
}
s.handleNodeUp(net.ParseIP(h.Peer()), h.Port(), true)
}
}
}
// SetConsistency sets the default consistency level for this session. This
// setting can also be changed on a per-query basis and the default value
// is Quorum.

View file

@ -5,3 +5,103 @@ $ go get github.com/golang/snappy
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.
Benchmarks.
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-04-29:
"go test -test.bench=."
_UFlat0-8 2.23GB/s ± 1% html
_UFlat1-8 1.43GB/s ± 0% urls
_UFlat2-8 23.7GB/s ± 1% jpg
_UFlat3-8 1.93GB/s ± 0% jpg_200
_UFlat4-8 13.9GB/s ± 2% pdf
_UFlat5-8 2.00GB/s ± 0% html4
_UFlat6-8 829MB/s ± 0% txt1
_UFlat7-8 799MB/s ± 0% txt2
_UFlat8-8 871MB/s ± 0% txt3
_UFlat9-8 730MB/s ± 0% txt4
_UFlat10-8 2.87GB/s ± 0% pb
_UFlat11-8 1.07GB/s ± 0% gaviota
_ZFlat0-8 1.04GB/s ± 0% html
_ZFlat1-8 536MB/s ± 0% urls
_ZFlat2-8 16.3GB/s ± 2% jpg
_ZFlat3-8 762MB/s ± 0% jpg_200
_ZFlat4-8 9.48GB/s ± 1% pdf
_ZFlat5-8 990MB/s ± 0% html4
_ZFlat6-8 381MB/s ± 0% txt1
_ZFlat7-8 353MB/s ± 0% txt2
_ZFlat8-8 398MB/s ± 0% txt3
_ZFlat9-8 329MB/s ± 0% txt4
_ZFlat10-8 1.35GB/s ± 1% pb
_ZFlat11-8 608MB/s ± 0% gaviota
"go test -test.bench=. -tags=noasm"
_UFlat0-8 637MB/s ± 0% html
_UFlat1-8 506MB/s ± 0% urls
_UFlat2-8 23.0GB/s ± 5% jpg
_UFlat3-8 1.17GB/s ± 0% jpg_200
_UFlat4-8 4.44GB/s ± 1% pdf
_UFlat5-8 623MB/s ± 0% html4
_UFlat6-8 300MB/s ± 1% txt1
_UFlat7-8 293MB/s ± 0% txt2
_UFlat8-8 316MB/s ± 0% txt3
_UFlat9-8 285MB/s ± 0% txt4
_UFlat10-8 768MB/s ± 0% pb
_UFlat11-8 406MB/s ± 1% gaviota
_ZFlat0-8 411MB/s ± 1% html
_ZFlat1-8 250MB/s ± 1% urls
_ZFlat2-8 12.7GB/s ± 1% jpg
_ZFlat3-8 157MB/s ± 0% jpg_200
_ZFlat4-8 2.95GB/s ± 0% pdf
_ZFlat5-8 406MB/s ± 0% html4
_ZFlat6-8 182MB/s ± 0% txt1
_ZFlat7-8 173MB/s ± 1% txt2
_ZFlat8-8 191MB/s ± 0% txt3
_ZFlat9-8 166MB/s ± 0% txt4
_ZFlat10-8 480MB/s ± 0% pb
_ZFlat11-8 272MB/s ± 0% gaviota
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
are the numbers from C++ Snappy's
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
BM_UFlat/0 2.4GB/s html
BM_UFlat/1 1.4GB/s urls
BM_UFlat/2 21.8GB/s jpg
BM_UFlat/3 1.5GB/s jpg_200
BM_UFlat/4 13.3GB/s pdf
BM_UFlat/5 2.1GB/s html4
BM_UFlat/6 1.0GB/s txt1
BM_UFlat/7 959.4MB/s txt2
BM_UFlat/8 1.0GB/s txt3
BM_UFlat/9 864.5MB/s txt4
BM_UFlat/10 2.9GB/s pb
BM_UFlat/11 1.2GB/s gaviota
BM_ZFlat/0 944.3MB/s html (22.31 %)
BM_ZFlat/1 501.6MB/s urls (47.78 %)
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
BM_ZFlat/10 1.2GB/s pb (19.68 %)
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)

View file

@ -8,10 +8,17 @@
#include "textflag.h"
// TODO: figure out why the XXX lines compile with Go 1.4 and Go tip but not
// Go 1.6.
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
// https://github.com/golang/snappy/issues/29
//
// This is https://github.com/golang/snappy/issues/29
// As a workaround, the package was built with a known good assembler, and
// those instructions were disassembled by "objdump -d" to yield the
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
// style comments, in AT&T asm syntax. Note that rsp here is a physical
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
// fine on Go 1.6.
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
@ -21,19 +28,23 @@
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - AX return value
// - AX len(lit)
// - BX n
// - CX len(lit)
// - SI &lit[0]
// - DX return value
// - DI &dst[i]
// - R10 &lit[0]
//
// The 24 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
MOVQ dst_base+0(FP), DI
MOVQ lit_base+24(FP), SI
MOVQ lit_len+32(FP), CX
MOVQ CX, AX
MOVL CX, BX
MOVQ lit_base+24(FP), R10
MOVQ lit_len+32(FP), AX
MOVQ AX, DX
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
@ -45,32 +56,32 @@ threeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
ADDQ $3, AX
JMP emitLiteralEnd
ADDQ $3, DX
JMP memmove
twoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
ADDQ $2, AX
JMP emitLiteralEnd
ADDQ $2, DX
JMP memmove
oneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
ADDQ $1, AX
ADDQ $1, DX
emitLiteralEnd:
MOVQ AX, ret+48(FP)
memmove:
MOVQ DX, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, SI and CX as arguments.
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
CALL runtime·memmove(SB)
RET
@ -79,55 +90,59 @@ emitLiteralEnd:
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - BX offset
// - CX length
// - AX length
// - SI &dst[0]
// - DI &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVQ dst_base+0(FP), DI
MOVQ DI, SI
MOVQ offset+24(FP), BX
MOVQ length+32(FP), CX
MOVQ offset+24(FP), R11
MOVQ length+32(FP), AX
loop0:
// for length >= 68 { etc }
CMPL CX, $68
CMPL AX, $68
JLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW BX, 1(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, CX
SUBL $64, AX
JMP loop0
step1:
// if length > 64 { etc }
CMPL CX, $64
CMPL AX, $64
JLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW BX, 1(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, CX
SUBL $60, AX
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMPL CX, $12
CMPL AX, $12
JGE step3
CMPL BX, $2048
CMPL R11, $2048
JGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB BX, 1(DI)
SHRL $8, BX
SHLB $5, BX
SUBB $4, CX
SHLB $2, CX
ORB CX, BX
ORB $1, BX
MOVB BX, 0(DI)
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
// Return the number of bytes written.
@ -137,11 +152,11 @@ step2:
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, CX
SHLB $2, CX
ORB $2, CX
MOVB CX, 0(DI)
MOVW BX, 1(DI)
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
// Return the number of bytes written.
@ -154,33 +169,37 @@ step3:
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - CX &src[0]
// - DX &src[len(src)]
// - SI &src[i]
// - DI &src[j]
// - R9 &src[len(src) - 8]
// - DX &src[0]
// - SI &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVQ src_base+0(FP), CX
MOVQ src_len+8(FP), DX
MOVQ i+24(FP), SI
MOVQ j+32(FP), DI
ADDQ CX, DX
ADDQ CX, SI
ADDQ CX, DI
MOVQ DX, R9
SUBQ $8, R9
MOVQ src_base+0(FP), DX
MOVQ src_len+8(FP), R14
MOVQ i+24(FP), R15
MOVQ j+32(FP), SI
ADDQ DX, R14
ADDQ DX, R15
ADDQ DX, SI
MOVQ R14, R13
SUBQ $8, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ DI, R9
CMPQ SI, R13
JA cmp1
MOVQ (SI), AX
MOVQ (DI), BX
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, R15
ADDQ $8, SI
ADDQ $8, DI
JMP cmp8
bsf:
@ -191,29 +210,29 @@ bsf:
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, DI
ADDQ BX, SI
// Convert from &src[ret] to ret.
SUBQ CX, DI
MOVQ DI, ret+40(FP)
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMPQ DI, DX
CMPQ SI, R14
JAE extendMatchEnd
MOVB (SI), AX
MOVB (DI), BX
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE extendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
ADDQ $1, DI
JMP cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUBQ CX, DI
MOVQ DI, ret+40(FP)
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
@ -232,8 +251,8 @@ extendMatchEnd:
// - R10 . &src[nextEmit]
// - R11 96 prevHash, currHash, nextHash, offset
// - R12 104 &src[base], skip
// - R13 . &src[nextS]
// - R14 . len(src), bytesBetweenHashLookups, x
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 112 candidate
//
// The second column (56, 64, etc) is the stack offset to spill the registers
@ -352,6 +371,7 @@ inner0:
// table[nextHash] = uint16(s)
MOVQ SI, AX
SUBQ DX, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
@ -384,32 +404,63 @@ fourByteMatch:
CMPQ AX, $16
JLE emitLiteralFastPath
// d += emitLiteral(dst[d:], src[nextEmit:s])
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT inlineEmitLiteralOneByte
CMPL BX, $256
JLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
MOVQ R15, 112(SP)
CALL ·emitLiteral(SB)
CALL runtime·memmove(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
MOVQ 112(SP), R15
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
JMP inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB AX, BX
@ -442,60 +493,129 @@ inner1:
SUBQ R15, R11
SUBQ DX, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
//
// Push args.
MOVQ DX, 0(SP)
// !!! R14 = &src[len(src)]
MOVQ src_len+32(FP), R14
MOVQ R14, 8(SP)
MOVQ R14, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
ADDQ DX, R14
// !!! R13 = &src[len(src) - 8]
MOVQ R14, R13
SUBQ $8, R13
// !!! R15 = &src[candidate + 4]
ADDQ $4, R15
MOVQ R15, 24(SP)
ADDQ DX, R15
// !!! s += 4
ADDQ $4, SI
SUBQ DX, SI
MOVQ SI, 32(SP)
// Spill local variables (registers) onto the stack; call; unspill.
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA inlineExtendMatchCmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE inlineExtendMatchBSF
ADDQ $8, R15
ADDQ $8, SI
JMP inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
JMP inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE inlineExtendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE inlineExtendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// We don't need to unspill CX or R9 as we are just about to call another
// function.
MOVQ DI, 80(SP)
MOVQ R11, 96(SP)
MOVQ R12, 104(SP)
CALL ·extendMatch(SB)
MOVQ 64(SP), DX
MOVQ 80(SP), DI
MOVQ 96(SP), R11
MOVQ 104(SP), R12
// Finish the "s =" part of "s = extendMatch(etc)", remembering that the SI
// register holds &src[s], not s.
MOVQ 40(SP), SI
ADDQ DX, SI
// d += emitCopy(dst[d:], base-candidate, s-base)
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R11, 24(SP)
// !!! length := s - base
MOVQ SI, AX
SUBQ R12, AX
MOVQ AX, 32(SP)
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
CALL ·emitCopy(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
inlineEmitCopyLoop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT inlineEmitCopyStep1
// Finish the "d +=" part of "d += emitCopy(etc)".
ADDQ 40(SP), DI
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
CMPL AX, $64
JLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
CMPL AX, $12
JGE inlineEmitCopyStep3
CMPL R11, $2048
JGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
JMP inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVQ SI, R10
@ -522,6 +642,7 @@ inner1:
MOVQ SI, AX
SUBQ DX, AX
SUBQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
@ -549,6 +670,7 @@ inner1:
// table[currHash] = uint16(s)
ADDQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66

12
vendor/github.com/hashicorp/go-rootcerts/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,12 @@
sudo: false
language: go
go:
- 1.6
branches:
only:
- master
script: make test

363
vendor/github.com/hashicorp/go-rootcerts/LICENSE generated vendored Normal file
View file

@ -0,0 +1,363 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

8
vendor/github.com/hashicorp/go-rootcerts/Makefile generated vendored Normal file
View file

@ -0,0 +1,8 @@
TEST?=./...
test:
go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4
go vet $(TEST)
go test $(TEST) -race
.PHONY: test

43
vendor/github.com/hashicorp/go-rootcerts/README.md generated vendored Normal file
View file

@ -0,0 +1,43 @@
# rootcerts
Functions for loading root certificates for TLS connections.
-----
Go's standard library `crypto/tls` provides a common mechanism for configuring
TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool
of certificates for the client to use as a trust store when verifying server
certificates.
This library contains utility functions for loading certificates destined for
that field, as well as one other important thing:
When the `RootCAs` field is `nil`, the standard library attempts to load the
host's root CA set. This behavior is OS-specific, and the Darwin
implementation contains [a bug that prevents trusted certificates from the
System and Login keychains from being loaded][1]. This library contains
Darwin-specific behavior that works around that bug.
[1]: https://github.com/golang/go/issues/14514
## Example Usage
Here's a snippet demonstrating how this library is meant to be used:
```go
func httpClient() (*http.Client, error)
tlsConfig := &tls.Config{}
err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
CAFile: os.Getenv("MYAPP_CAFILE"),
CAPath: os.Getenv("MYAPP_CAPATH"),
})
if err != nil {
return nil, err
}
c := cleanhttp.DefaultClient()
t := cleanhttp.DefaultTransport()
t.TLSClientConfig = tlsConfig
c.Transport = t
return c, nil
}
```

9
vendor/github.com/hashicorp/go-rootcerts/doc.go generated vendored Normal file
View file

@ -0,0 +1,9 @@
// Package rootcerts contains functions to aid in loading CA certificates for
// TLS connections.
//
// In addition, its default behavior on Darwin works around an open issue [1]
// in Go's crypto/x509 that prevents certicates from being loaded from the
// System or Login keychains.
//
// [1] https://github.com/golang/go/issues/14514
package rootcerts

103
vendor/github.com/hashicorp/go-rootcerts/rootcerts.go generated vendored Normal file
View file

@ -0,0 +1,103 @@
package rootcerts
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
// Config determines where LoadCACerts will load certificates from. When both
// CAFile and CAPath are blank, this library's functions will either load
// system roots explicitly and return them, or set the CertPool to nil to allow
// Go's standard library to load system certs.
type Config struct {
// CAFile is a path to a PEM-encoded certificate file or bundle. Takes
// precedence over CAPath.
CAFile string
// CAPath is a path to a directory populated with PEM-encoded certificates.
CAPath string
}
// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the
// Config specified.
func ConfigureTLS(t *tls.Config, c *Config) error {
if t == nil {
return nil
}
pool, err := LoadCACerts(c)
if err != nil {
return err
}
t.RootCAs = pool
return nil
}
// LoadCACerts loads a CertPool based on the Config specified.
func LoadCACerts(c *Config) (*x509.CertPool, error) {
if c == nil {
c = &Config{}
}
if c.CAFile != "" {
return LoadCAFile(c.CAFile)
}
if c.CAPath != "" {
return LoadCAPath(c.CAPath)
}
return LoadSystemCAs()
}
// LoadCAFile loads a single PEM-encoded file from the path specified.
func LoadCAFile(caFile string) (*x509.CertPool, error) {
pool := x509.NewCertPool()
pem, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("Error loading CA File: %s", err)
}
ok := pool.AppendCertsFromPEM(pem)
if !ok {
return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile)
}
return pool, nil
}
// LoadCAPath walks the provided path and loads all certificates encounted into
// a pool.
func LoadCAPath(caPath string) (*x509.CertPool, error) {
pool := x509.NewCertPool()
walkFn := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
pem, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("Error loading file from CAPath: %s", err)
}
ok := pool.AppendCertsFromPEM(pem)
if !ok {
return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path)
}
return nil
}
err := filepath.Walk(caPath, walkFn)
if err != nil {
return nil, err
}
return pool, nil
}

View file

@ -0,0 +1,12 @@
// +build !darwin
package rootcerts
import "crypto/x509"
// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that
// default behavior of standard TLS config libraries is triggered, which is to
// load system certs.
func LoadSystemCAs() (*x509.CertPool, error) {
return nil, nil
}

View file

@ -0,0 +1,48 @@
package rootcerts
import (
"crypto/x509"
"os/exec"
"path"
"github.com/mitchellh/go-homedir"
)
// LoadSystemCAs has special behavior on Darwin systems to work around
func LoadSystemCAs() (*x509.CertPool, error) {
pool := x509.NewCertPool()
for _, keychain := range certKeychains() {
err := addCertsFromKeychain(pool, keychain)
if err != nil {
return nil, err
}
}
return pool, nil
}
func addCertsFromKeychain(pool *x509.CertPool, keychain string) error {
cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain)
data, err := cmd.Output()
if err != nil {
return err
}
pool.AppendCertsFromPEM(data)
return nil
}
func certKeychains() []string {
keychains := []string{
"/System/Library/Keychains/SystemRootCertificates.keychain",
"/Library/Keychains/System.keychain",
}
home, err := homedir.Dir()
if err == nil {
loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain")
keychains = append(keychains, loginKeychain)
}
return keychains
}

View file

@ -1,6 +1,6 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun
package pq

View file

@ -161,8 +161,12 @@ func (t *handshakeTransport) readOnePacket() ([]byte, error) {
t.readSinceKex += uint64(len(p))
if debugHandshake {
msg, err := decode(p)
log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err)
if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
log.Printf("%s got data (packet %d bytes)", t.id(), len(p))
} else {
msg, err := decode(p)
log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err)
}
}
if p[0] != msgKexInit {
return p, nil
@ -370,8 +374,8 @@ func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) erro
if t.sessionID == nil {
t.sessionID = result.H
result.SessionID = result.H
}
result.SessionID = t.sessionID
t.conn.prepareKeyChange(algs, result)
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {

View file

@ -61,7 +61,7 @@ type Context interface {
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out <-chan Value) error {
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {

View file

@ -62,7 +62,7 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) {
func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) {
h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
if h.Len < SizeofCmsghdr || int(h.Len) > len(b) {
if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) {
return nil, nil, EINVAL
}
return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil

View file

@ -18,6 +18,7 @@ package unix
//sysnb Getgid() (gid int)
//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_UGETRLIMIT
//sysnb Getuid() (uid int)
//sysnb InotifyInit() (fd int, err error)
//sys Ioperm(from int, num int, on int) (err error)
//sys Iopl(level int) (err error)
//sys Lchown(path string, uid int, gid int) (err error)
@ -97,3 +98,29 @@ func (msghdr *Msghdr) SetControllen(length int) {
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint64(length)
}
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe(&pp)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}

View file

@ -113,6 +113,9 @@ struct my_epoll_event {
// padding is not specified in linux/eventpoll.h but added to conform to the
// alignment requirements of EABI
int32_t padFd;
#endif
#ifdef __powerpc64__
int32_t _padFd;
#endif
int32_t fd;
int32_t pad;

View file

@ -1298,6 +1298,17 @@ func Getuid() (uid int) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func InotifyInit() (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ioperm(from int, num int, on int) (err error) {
_, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on))
if e1 != 0 {
@ -1810,3 +1821,23 @@ func Utime(path string, buf *Utimbuf) (err error) {
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -1298,6 +1298,17 @@ func Getuid() (uid int) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func InotifyInit() (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ioperm(from int, num int, on int) (err error) {
_, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on))
if e1 != 0 {
@ -1810,3 +1821,23 @@ func Utime(path string, buf *Utimbuf) (err error) {
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -589,9 +589,10 @@ type Ustat_t struct {
}
type EpollEvent struct {
Events uint32
Fd int32
Pad int32
Events uint32
X_padFd int32
Fd int32
Pad int32
}
const (

View file

@ -589,9 +589,10 @@ type Ustat_t struct {
}
type EpollEvent struct {
Events uint32
Fd int32
Pad int32
Events uint32
X_padFd int32
Fd int32
Pad int32
}
const (

View file

@ -228,7 +228,7 @@ of the header should be "X-Vault-Token" and the value should be the token.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/lookup`</dd>
@ -599,8 +599,9 @@ of the header should be "X-Vault-Token" and the value should be the token.
If set, tokens created against this role will <i>not</i> have a maximum
lifetime. Instead, they will have a fixed TTL that is refreshed with
each renewal. So long as they continue to be renewed, they will never
expire. The parameter is an integer duration of seconds or a duration
string (e.g. `"72h"`).
expire. The parameter is an integer duration of seconds. Tokens issued
track updates to the role value; the new period takes effect upon next
renew.
</li>
<li>
<span class="param">path_suffix</span>

View file

@ -59,3 +59,6 @@ These libraries are provided by the community.
* [HVAC](https://github.com/ianunruh/hvac)
* `pip install hvac`
### Scala
* [scala-vault](https://github.com/janstenpickle/scala-vault)

View file

@ -43,7 +43,8 @@
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-62364009-1', 'auto');
ga('create', 'UA-62364009-1', 'vaultproject.io');
ga('require', 'linkid');
ga('send', 'pageview');
</script>